Whamcloud - gitweb
3a778a6f17f3319384607f9a6d11d973580b8807
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/llite/llite_lib.c
33  *
34  * Lustre Light Super operations
35  */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include <linux/cpu.h>
40 #include <linux/module.h>
41 #include <linux/random.h>
42 #include <linux/statfs.h>
43 #include <linux/time.h>
44 #include <linux/types.h>
45 #include <libcfs/linux/linux-uuid.h>
46 #include <linux/version.h>
47 #include <linux/mm.h>
48 #include <linux/user_namespace.h>
49 #include <linux/delay.h>
50 #include <linux/uidgid.h>
51 #include <linux/security.h>
52 #include <linux/fs_struct.h>
53
54 #ifndef HAVE_CPUS_READ_LOCK
55 #include <libcfs/linux/linux-cpu.h>
56 #endif
57 #include <uapi/linux/lustre/lustre_ioctl.h>
58 #ifdef HAVE_UAPI_LINUX_MOUNT_H
59 #include <uapi/linux/mount.h>
60 #endif
61
62 #include <lustre_ha.h>
63 #include <lustre_dlm.h>
64 #include <lprocfs_status.h>
65 #include <lustre_disk.h>
66 #include <uapi/linux/lustre/lustre_param.h>
67 #include <lustre_log.h>
68 #include <cl_object.h>
69 #include <obd_cksum.h>
70 #include "llite_internal.h"
71
72 struct kmem_cache *ll_file_data_slab;
73
74 #ifndef log2
75 #define log2(n) ffz(~(n))
76 #endif
77
78 /**
79  * If there is only one number of core visible to Lustre,
80  * async readahead will be disabled, to avoid massive over
81  * subscription, we use 1/2 of active cores as default max
82  * async readahead requests.
83  */
84 static inline unsigned int ll_get_ra_async_max_active(void)
85 {
86         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
87 }
88
89 static struct ll_sb_info *ll_init_sbi(void)
90 {
91         struct ll_sb_info *sbi = NULL;
92         unsigned long pages;
93         unsigned long lru_page_max;
94         struct sysinfo si;
95         int rc;
96         int i;
97
98         ENTRY;
99
100         OBD_ALLOC_PTR(sbi);
101         if (sbi == NULL)
102                 RETURN(ERR_PTR(-ENOMEM));
103
104         rc = pcc_super_init(&sbi->ll_pcc_super);
105         if (rc < 0)
106                 GOTO(out_sbi, rc);
107
108         spin_lock_init(&sbi->ll_lock);
109         mutex_init(&sbi->ll_lco.lco_lock);
110         spin_lock_init(&sbi->ll_pp_extent_lock);
111         spin_lock_init(&sbi->ll_process_lock);
112         sbi->ll_rw_stats_on = 0;
113         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
114
115         si_meminfo(&si);
116         pages = si.totalram - si.totalhigh;
117         lru_page_max = pages / 2;
118
119         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
120         sbi->ll_ra_info.ll_readahead_wq =
121                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
122                                        0, CFS_CPT_ANY,
123                                        sbi->ll_ra_info.ra_async_max_active);
124         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
125                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
126
127         /* initialize ll_cache data */
128         sbi->ll_cache = cl_cache_init(lru_page_max);
129         if (sbi->ll_cache == NULL)
130                 GOTO(out_destroy_ra, rc = -ENOMEM);
131
132         /* initialize foreign symlink prefix path */
133         OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
134         if (sbi->ll_foreign_symlink_prefix == NULL)
135                 GOTO(out_destroy_ra, rc = -ENOMEM);
136         memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/"));
137         sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/");
138
139         /* initialize foreign symlink upcall path, none by default */
140         OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none"));
141         if (sbi->ll_foreign_symlink_upcall == NULL)
142                 GOTO(out_destroy_ra, rc = -ENOMEM);
143         memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none"));
144         sbi->ll_foreign_symlink_upcall_items = NULL;
145         sbi->ll_foreign_symlink_upcall_nb_items = 0;
146         init_rwsem(&sbi->ll_foreign_symlink_sem);
147         /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags)
148          * not enabled by default
149          */
150
151         sbi->ll_ra_info.ra_max_pages =
152                 min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
153         sbi->ll_ra_info.ra_max_pages_per_file =
154                 min(sbi->ll_ra_info.ra_max_pages / 4,
155                     SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
156         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
157                                 sbi->ll_ra_info.ra_max_pages_per_file;
158         sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
159         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
160         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
161
162         sbi->ll_flags |= LL_SBI_VERBOSE;
163 #ifdef ENABLE_CHECKSUM
164         sbi->ll_flags |= LL_SBI_CHECKSUM;
165 #endif
166 #ifdef ENABLE_FLOCK
167         sbi->ll_flags |= LL_SBI_FLOCK;
168 #endif
169
170 #ifdef HAVE_LRU_RESIZE_SUPPORT
171         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
172 #endif
173         sbi->ll_flags |= LL_SBI_LAZYSTATFS;
174
175         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
176                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
177                                pp_r_hist.oh_lock);
178                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
179                                pp_w_hist.oh_lock);
180         }
181
182         /* metadata statahead is enabled by default */
183         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
184         sbi->ll_sa_max = LL_SA_RPC_DEF;
185         atomic_set(&sbi->ll_sa_total, 0);
186         atomic_set(&sbi->ll_sa_wrong, 0);
187         atomic_set(&sbi->ll_sa_running, 0);
188         atomic_set(&sbi->ll_agl_total, 0);
189         sbi->ll_flags |= LL_SBI_AGL_ENABLED;
190         sbi->ll_flags |= LL_SBI_FAST_READ;
191         sbi->ll_flags |= LL_SBI_TINY_WRITE;
192         ll_sbi_set_encrypt(sbi, true);
193
194         /* root squash */
195         sbi->ll_squash.rsi_uid = 0;
196         sbi->ll_squash.rsi_gid = 0;
197         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
198         spin_lock_init(&sbi->ll_squash.rsi_lock);
199
200         /* Per-filesystem file heat */
201         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
202         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
203         RETURN(sbi);
204 out_destroy_ra:
205         if (sbi->ll_foreign_symlink_prefix)
206                 OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
207         if (sbi->ll_cache) {
208                 cl_cache_decref(sbi->ll_cache);
209                 sbi->ll_cache = NULL;
210         }
211         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
212 out_pcc:
213         pcc_super_fini(&sbi->ll_pcc_super);
214 out_sbi:
215         OBD_FREE_PTR(sbi);
216         RETURN(ERR_PTR(rc));
217 }
218
219 static void ll_free_sbi(struct super_block *sb)
220 {
221         struct ll_sb_info *sbi = ll_s2sbi(sb);
222         ENTRY;
223
224         if (sbi != NULL) {
225                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
226                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
227                 if (sbi->ll_ra_info.ll_readahead_wq)
228                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
229                 if (sbi->ll_cache != NULL) {
230                         cl_cache_decref(sbi->ll_cache);
231                         sbi->ll_cache = NULL;
232                 }
233                 if (sbi->ll_foreign_symlink_prefix) {
234                         OBD_FREE(sbi->ll_foreign_symlink_prefix,
235                                  sbi->ll_foreign_symlink_prefix_size);
236                         sbi->ll_foreign_symlink_prefix = NULL;
237                 }
238                 if (sbi->ll_foreign_symlink_upcall) {
239                         OBD_FREE(sbi->ll_foreign_symlink_upcall,
240                                  strlen(sbi->ll_foreign_symlink_upcall) +
241                                        1);
242                         sbi->ll_foreign_symlink_upcall = NULL;
243                 }
244                 if (sbi->ll_foreign_symlink_upcall_items) {
245                         int i;
246                         int nb_items = sbi->ll_foreign_symlink_upcall_nb_items;
247                         struct ll_foreign_symlink_upcall_item *items =
248                                 sbi->ll_foreign_symlink_upcall_items;
249
250                         for (i = 0 ; i < nb_items; i++)
251                                 if (items[i].type == STRING_TYPE)
252                                         OBD_FREE(items[i].string,
253                                                        items[i].size);
254
255                         OBD_FREE_LARGE(items, nb_items *
256                                 sizeof(struct ll_foreign_symlink_upcall_item));
257                         sbi->ll_foreign_symlink_upcall_items = NULL;
258                 }
259                 pcc_super_fini(&sbi->ll_pcc_super);
260                 OBD_FREE(sbi, sizeof(*sbi));
261         }
262         EXIT;
263 }
264
265 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
266 {
267         struct inode *root = NULL;
268         struct ll_sb_info *sbi = ll_s2sbi(sb);
269         struct obd_statfs *osfs = NULL;
270         struct ptlrpc_request *request = NULL;
271         struct obd_connect_data *data = NULL;
272         struct obd_uuid *uuid;
273         struct md_op_data *op_data;
274         struct lustre_md lmd;
275         u64 valid;
276         int size, err, checksum;
277
278         ENTRY;
279         sbi->ll_md_obd = class_name2obd(md);
280         if (!sbi->ll_md_obd) {
281                 CERROR("MD %s: not setup or attached\n", md);
282                 RETURN(-EINVAL);
283         }
284
285         OBD_ALLOC_PTR(data);
286         if (data == NULL)
287                 RETURN(-ENOMEM);
288
289         OBD_ALLOC_PTR(osfs);
290         if (osfs == NULL) {
291                 OBD_FREE_PTR(data);
292                 RETURN(-ENOMEM);
293         }
294
295         /* pass client page size via ocd_grant_blkbits, the server should report
296          * back its backend blocksize for grant calculation purpose */
297         data->ocd_grant_blkbits = PAGE_SHIFT;
298
299         /* indicate MDT features supported by this client */
300         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
301                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
302                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
303                                   OBD_CONNECT_SRVLOCK  | OBD_CONNECT_TRUNCLOCK|
304                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
305                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
306                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
307                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
308                                   OBD_CONNECT_64BITHASH |
309                                   OBD_CONNECT_EINPROGRESS |
310                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
311                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
312                                   OBD_CONNECT_MAX_EASIZE |
313                                   OBD_CONNECT_FLOCK_DEAD |
314                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
315                                   OBD_CONNECT_OPEN_BY_FID |
316                                   OBD_CONNECT_DIR_STRIPE |
317                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
318                                   OBD_CONNECT_SUBTREE |
319                                   OBD_CONNECT_MULTIMODRPCS |
320                                   OBD_CONNECT_GRANT_PARAM |
321                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
322
323         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
324                                    OBD_CONNECT2_SUM_STATFS |
325                                    OBD_CONNECT2_OVERSTRIPING |
326                                    OBD_CONNECT2_FLR |
327                                    OBD_CONNECT2_LOCK_CONVERT |
328                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
329                                    OBD_CONNECT2_INC_XID |
330                                    OBD_CONNECT2_LSOM |
331                                    OBD_CONNECT2_ASYNC_DISCARD |
332                                    OBD_CONNECT2_PCC |
333                                    OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
334                                    OBD_CONNECT2_GETATTR_PFID |
335                                    OBD_CONNECT2_DOM_LVB;
336
337 #ifdef HAVE_LRU_RESIZE_SUPPORT
338         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
339                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
340 #endif
341         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
342
343         data->ocd_cksum_types = obd_cksum_types_supported_client();
344
345         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
346                 /* flag mdc connection as lightweight, only used for test
347                  * purpose, use with care */
348                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
349
350         data->ocd_ibits_known = MDS_INODELOCK_FULL;
351         data->ocd_version = LUSTRE_VERSION_CODE;
352
353         if (sb->s_flags & SB_RDONLY)
354                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
355         if (sbi->ll_flags & LL_SBI_USER_XATTR)
356                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
357
358 #ifdef SB_NOSEC
359         /* Setting this indicates we correctly support S_NOSEC (See kernel
360          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
361          */
362         sb->s_flags |= SB_NOSEC;
363 #endif
364
365         if (sbi->ll_flags & LL_SBI_FLOCK)
366                 sbi->ll_fop = &ll_file_operations_flock;
367         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
368                 sbi->ll_fop = &ll_file_operations;
369         else
370                 sbi->ll_fop = &ll_file_operations_noflock;
371
372         /* always ping even if server suppress_pings */
373         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
374                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
375
376         obd_connect_set_secctx(data);
377         if (ll_sbi_has_encrypt(sbi))
378                 obd_connect_set_enc(data);
379
380 #if defined(CONFIG_SECURITY)
381         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
382 #endif
383
384         data->ocd_brw_size = MD_MAX_BRW_SIZE;
385
386         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
387                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
388         if (err == -EBUSY) {
389                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
390                                    "recovery, of which this client is not a "
391                                    "part. Please wait for recovery to complete,"
392                                    " abort, or time out.\n", md);
393                 GOTO(out, err);
394         } else if (err) {
395                 CERROR("cannot connect to %s: rc = %d\n", md, err);
396                 GOTO(out, err);
397         }
398
399         sbi->ll_md_exp->exp_connect_data = *data;
400
401         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
402                            LUSTRE_SEQ_METADATA);
403         if (err) {
404                 CERROR("%s: Can't init metadata layer FID infrastructure, "
405                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
406                 GOTO(out_md, err);
407         }
408
409         /* For mount, we only need fs info from MDT0, and also in DNE, it
410          * can make sure the client can be mounted as long as MDT0 is
411          * avaible */
412         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
413                         ktime_get_seconds() - sbi->ll_statfs_max_age,
414                         OBD_STATFS_FOR_MDT0);
415         if (err)
416                 GOTO(out_md_fid, err);
417
418         /* This needs to be after statfs to ensure connect has finished.
419          * Note that "data" does NOT contain the valid connect reply.
420          * If connecting to a 1.8 server there will be no LMV device, so
421          * we can access the MDC export directly and exp_connect_flags will
422          * be non-zero, but if accessing an upgraded 2.1 server it will
423          * have the correct flags filled in.
424          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
425         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
426         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
427             valid != CLIENT_CONNECT_MDT_REQD) {
428                 char *buf;
429
430                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
431                 obd_connect_flags2str(buf, PAGE_SIZE,
432                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
433                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
434                                    "feature(s) needed for correct operation "
435                                    "of this client (%s). Please upgrade "
436                                    "server or downgrade client.\n",
437                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
438                 OBD_FREE(buf, PAGE_SIZE);
439                 GOTO(out_md_fid, err = -EPROTO);
440         }
441
442         size = sizeof(*data);
443         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
444                            KEY_CONN_DATA,  &size, data);
445         if (err) {
446                 CERROR("%s: Get connect data failed: rc = %d\n",
447                        sbi->ll_md_exp->exp_obd->obd_name, err);
448                 GOTO(out_md_fid, err);
449         }
450
451         LASSERT(osfs->os_bsize);
452         sb->s_blocksize = osfs->os_bsize;
453         sb->s_blocksize_bits = log2(osfs->os_bsize);
454         sb->s_magic = LL_SUPER_MAGIC;
455         sb->s_maxbytes = MAX_LFS_FILESIZE;
456         sbi->ll_namelen = osfs->os_namelen;
457         sbi->ll_mnt.mnt = current->fs->root.mnt;
458
459         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
460             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
461                 LCONSOLE_INFO("Disabling user_xattr feature because "
462                               "it is not supported on the server\n");
463                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
464         }
465
466         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
467 #ifdef SB_POSIXACL
468                 sb->s_flags |= SB_POSIXACL;
469 #endif
470                 sbi->ll_flags |= LL_SBI_ACL;
471         } else {
472                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
473 #ifdef SB_POSIXACL
474                 sb->s_flags &= ~SB_POSIXACL;
475 #endif
476                 sbi->ll_flags &= ~LL_SBI_ACL;
477         }
478
479         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
480                 sbi->ll_flags |= LL_SBI_64BIT_HASH;
481
482         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
483                 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
484
485         if (obd_connect_has_secctx(data))
486                 sbi->ll_flags |= LL_SBI_FILE_SECCTX;
487
488         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
489                 if (ll_sbi_has_test_dummy_encryption(sbi))
490                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
491                                       sbi->ll_fsname,
492                                       sbi->ll_md_exp->exp_obd->obd_name);
493                 ll_sbi_set_encrypt(sbi, false);
494         }
495
496         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
497                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
498                         LCONSOLE_INFO("%s: disabling xattr cache due to "
499                                       "unknown maximum xattr size.\n", dt);
500                 } else if (!sbi->ll_xattr_cache_set) {
501                         /* If xattr_cache is already set (no matter 0 or 1)
502                          * during processing llog, it won't be enabled here. */
503                         sbi->ll_flags |= LL_SBI_XATTR_CACHE;
504                         sbi->ll_xattr_cache_enabled = 1;
505                 }
506         }
507
508         sbi->ll_dt_obd = class_name2obd(dt);
509         if (!sbi->ll_dt_obd) {
510                 CERROR("DT %s: not setup or attached\n", dt);
511                 GOTO(out_md_fid, err = -ENODEV);
512         }
513
514         /* pass client page size via ocd_grant_blkbits, the server should report
515          * back its backend blocksize for grant calculation purpose */
516         data->ocd_grant_blkbits = PAGE_SHIFT;
517
518         /* indicate OST features supported by this client */
519         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
520                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
521                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
522                                   OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
523                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
524                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
525                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
526                                   OBD_CONNECT_EINPROGRESS |
527                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
528                                   OBD_CONNECT_LAYOUTLOCK |
529                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
530                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
531                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
532         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
533                                    OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK;
534
535         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
536                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
537
538         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
539          * disabled by default, because it can still be enabled on the
540          * fly via /sys. As a consequence, we still need to come to an
541          * agreement on the supported algorithms at connect time
542          */
543         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
544
545         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
546                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
547         else
548                 data->ocd_cksum_types = obd_cksum_types_supported_client();
549
550 #ifdef HAVE_LRU_RESIZE_SUPPORT
551         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
552 #endif
553         /* always ping even if server suppress_pings */
554         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
555                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
556
557         if (ll_sbi_has_encrypt(sbi))
558                 obd_connect_set_enc(data);
559
560         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
561                "ocd_grant: %d\n", data->ocd_connect_flags,
562                data->ocd_version, data->ocd_grant);
563
564         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
565         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
566
567         data->ocd_brw_size = DT_MAX_BRW_SIZE;
568
569         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
570                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
571         if (err == -EBUSY) {
572                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
573                                    "recovery, of which this client is not a "
574                                    "part.  Please wait for recovery to "
575                                    "complete, abort, or time out.\n", dt);
576                 GOTO(out_md, err);
577         } else if (err) {
578                 CERROR("%s: Cannot connect to %s: rc = %d\n",
579                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
580                 GOTO(out_md, err);
581         }
582
583         if (ll_sbi_has_encrypt(sbi) &&
584             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
585                 if (ll_sbi_has_test_dummy_encryption(sbi))
586                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
587                                       sbi->ll_fsname, dt);
588                 ll_sbi_set_encrypt(sbi, false);
589         } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
590                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
591         }
592
593         sbi->ll_dt_exp->exp_connect_data = *data;
594
595         /* Don't change value if it was specified in the config log */
596         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
597                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
598                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
599                               (data->ocd_brw_size >> PAGE_SHIFT));
600                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
601                     sbi->ll_ra_info.ra_max_pages_per_file)
602                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
603                                 sbi->ll_ra_info.ra_max_pages_per_file;
604         }
605
606         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
607                            LUSTRE_SEQ_METADATA);
608         if (err) {
609                 CERROR("%s: Can't init data layer FID infrastructure, "
610                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
611                 GOTO(out_dt, err);
612         }
613
614         mutex_lock(&sbi->ll_lco.lco_lock);
615         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
616         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
617         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
618         mutex_unlock(&sbi->ll_lco.lco_lock);
619
620         fid_zero(&sbi->ll_root_fid);
621         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
622                            &sbi->ll_root_fid);
623         if (err) {
624                 CERROR("cannot mds_connect: rc = %d\n", err);
625                 GOTO(out_lock_cn_cb, err);
626         }
627         if (!fid_is_sane(&sbi->ll_root_fid)) {
628                 CERROR("%s: Invalid root fid "DFID" during mount\n",
629                        sbi->ll_md_exp->exp_obd->obd_name,
630                        PFID(&sbi->ll_root_fid));
631                 GOTO(out_lock_cn_cb, err = -EINVAL);
632         }
633         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
634
635         sb->s_op = &lustre_super_operations;
636         sb->s_xattr = ll_xattr_handlers;
637 #if THREAD_SIZE >= 8192 /*b=17630*/
638         sb->s_export_op = &lustre_export_operations;
639 #endif
640 #ifdef HAVE_LUSTRE_CRYPTO
641         llcrypt_set_ops(sb, &lustre_cryptops);
642 #endif
643
644         /* make root inode
645          * XXX: move this to after cbd setup? */
646         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
647         if (sbi->ll_flags & LL_SBI_ACL)
648                 valid |= OBD_MD_FLACL;
649
650         OBD_ALLOC_PTR(op_data);
651         if (op_data == NULL)
652                 GOTO(out_lock_cn_cb, err = -ENOMEM);
653
654         op_data->op_fid1 = sbi->ll_root_fid;
655         op_data->op_mode = 0;
656         op_data->op_valid = valid;
657
658         err = md_getattr(sbi->ll_md_exp, op_data, &request);
659
660         OBD_FREE_PTR(op_data);
661         if (err) {
662                 CERROR("%s: md_getattr failed for root: rc = %d\n",
663                        sbi->ll_md_exp->exp_obd->obd_name, err);
664                 GOTO(out_lock_cn_cb, err);
665         }
666
667         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
668                                sbi->ll_md_exp, &lmd);
669         if (err) {
670                 CERROR("failed to understand root inode md: rc = %d\n", err);
671                 ptlrpc_req_finished(request);
672                 GOTO(out_lock_cn_cb, err);
673         }
674
675         LASSERT(fid_is_sane(&sbi->ll_root_fid));
676         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
677                                             sbi->ll_flags & LL_SBI_32BIT_API),
678                        &lmd);
679         md_free_lustre_md(sbi->ll_md_exp, &lmd);
680         ptlrpc_req_finished(request);
681
682         if (IS_ERR(root)) {
683                 lmd_clear_acl(&lmd);
684                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
685                 root = NULL;
686                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
687                        sbi->ll_fsname, err);
688                 GOTO(out_root, err);
689         }
690
691         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
692         if (sbi->ll_checksum_set) {
693                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
694                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
695                                          sizeof(checksum), &checksum, NULL);
696                 if (err) {
697                         CERROR("%s: Set checksum failed: rc = %d\n",
698                                sbi->ll_dt_exp->exp_obd->obd_name, err);
699                         GOTO(out_root, err);
700                 }
701         }
702         cl_sb_init(sb);
703
704         sb->s_root = d_make_root(root);
705         if (sb->s_root == NULL) {
706                 err = -ENOMEM;
707                 CERROR("%s: can't make root dentry: rc = %d\n",
708                        sbi->ll_fsname, err);
709                 GOTO(out_root, err);
710         }
711
712         sbi->ll_sdev_orig = sb->s_dev;
713
714         /* We set sb->s_dev equal on all lustre clients in order to support
715          * NFS export clustering.  NFSD requires that the FSID be the same
716          * on all clients. */
717         /* s_dev is also used in lt_compare() to compare two fs, but that is
718          * only a node-local comparison. */
719         uuid = obd_get_uuid(sbi->ll_md_exp);
720         if (uuid != NULL)
721                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
722
723         if (data != NULL)
724                 OBD_FREE_PTR(data);
725         if (osfs != NULL)
726                 OBD_FREE_PTR(osfs);
727
728         if (sbi->ll_dt_obd) {
729                 err = sysfs_create_link(&sbi->ll_kset.kobj,
730                                         &sbi->ll_dt_obd->obd_kset.kobj,
731                                         sbi->ll_dt_obd->obd_type->typ_name);
732                 if (err < 0) {
733                         CERROR("%s: could not register %s in llite: rc = %d\n",
734                                dt, sbi->ll_fsname, err);
735                         err = 0;
736                 }
737         }
738
739         if (sbi->ll_md_obd) {
740                 err = sysfs_create_link(&sbi->ll_kset.kobj,
741                                         &sbi->ll_md_obd->obd_kset.kobj,
742                                         sbi->ll_md_obd->obd_type->typ_name);
743                 if (err < 0) {
744                         CERROR("%s: could not register %s in llite: rc = %d\n",
745                                md, sbi->ll_fsname, err);
746                         err = 0;
747                 }
748         }
749
750         RETURN(err);
751 out_root:
752         iput(root);
753 out_lock_cn_cb:
754         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
755 out_dt:
756         obd_disconnect(sbi->ll_dt_exp);
757         sbi->ll_dt_exp = NULL;
758         sbi->ll_dt_obd = NULL;
759 out_md_fid:
760         obd_fid_fini(sbi->ll_md_exp->exp_obd);
761 out_md:
762         obd_disconnect(sbi->ll_md_exp);
763         sbi->ll_md_exp = NULL;
764         sbi->ll_md_obd = NULL;
765 out:
766         if (data != NULL)
767                 OBD_FREE_PTR(data);
768         if (osfs != NULL)
769                 OBD_FREE_PTR(osfs);
770         return err;
771 }
772
773 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
774 {
775         int size, rc;
776
777         size = sizeof(*lmmsize);
778         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
779                           KEY_MAX_EASIZE, &size, lmmsize);
780         if (rc != 0) {
781                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
782                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
783                 RETURN(rc);
784         }
785
786         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
787
788         size = sizeof(int);
789         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
790                           KEY_MAX_EASIZE, &size, lmmsize);
791         if (rc)
792                 CERROR("Get max mdsize error rc %d\n", rc);
793
794         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
795
796         RETURN(rc);
797 }
798
799 /**
800  * Get the value of the default_easize parameter.
801  *
802  * \see client_obd::cl_default_mds_easize
803  *
804  * \param[in] sbi       superblock info for this filesystem
805  * \param[out] lmmsize  pointer to storage location for value
806  *
807  * \retval 0            on success
808  * \retval negative     negated errno on failure
809  */
810 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
811 {
812         int size, rc;
813
814         size = sizeof(int);
815         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
816                          KEY_DEFAULT_EASIZE, &size, lmmsize);
817         if (rc)
818                 CERROR("Get default mdsize error rc %d\n", rc);
819
820         RETURN(rc);
821 }
822
823 /**
824  * Set the default_easize parameter to the given value.
825  *
826  * \see client_obd::cl_default_mds_easize
827  *
828  * \param[in] sbi       superblock info for this filesystem
829  * \param[in] lmmsize   the size to set
830  *
831  * \retval 0            on success
832  * \retval negative     negated errno on failure
833  */
834 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
835 {
836         int rc;
837
838         if (lmmsize < sizeof(struct lov_mds_md) ||
839             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
840                 return -EINVAL;
841
842         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
843                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
844                                 sizeof(int), &lmmsize, NULL);
845
846         RETURN(rc);
847 }
848
849 static void client_common_put_super(struct super_block *sb)
850 {
851         struct ll_sb_info *sbi = ll_s2sbi(sb);
852         ENTRY;
853
854         cl_sb_fini(sb);
855
856         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
857         obd_disconnect(sbi->ll_dt_exp);
858         sbi->ll_dt_exp = NULL;
859
860         ll_debugfs_unregister_super(sb);
861
862         obd_fid_fini(sbi->ll_md_exp->exp_obd);
863         obd_disconnect(sbi->ll_md_exp);
864         sbi->ll_md_exp = NULL;
865
866         EXIT;
867 }
868
869 void ll_kill_super(struct super_block *sb)
870 {
871         struct ll_sb_info *sbi;
872         ENTRY;
873
874         /* not init sb ?*/
875         if (!(sb->s_flags & SB_ACTIVE))
876                 return;
877
878         sbi = ll_s2sbi(sb);
879         /* we need restore s_dev from changed for clustred NFS before put_super
880          * because new kernels have cached s_dev and change sb->s_dev in
881          * put_super not affected real removing devices */
882         if (sbi) {
883                 sb->s_dev = sbi->ll_sdev_orig;
884
885                 /* wait running statahead threads to quit */
886                 while (atomic_read(&sbi->ll_sa_running) > 0)
887                         schedule_timeout_uninterruptible(
888                                 cfs_time_seconds(1) >> 3);
889         }
890
891         EXIT;
892 }
893
894 static inline int ll_set_opt(const char *opt, char *data, int fl)
895 {
896         if (strncmp(opt, data, strlen(opt)) != 0)
897                 return 0;
898         else
899                 return fl;
900 }
901
902 /* non-client-specific mount options are parsed in lmd_parse */
903 static int ll_options(char *options, struct ll_sb_info *sbi)
904 {
905         int tmp;
906         char *s1 = options, *s2;
907         int *flags = &sbi->ll_flags;
908         ENTRY;
909
910         if (!options)
911                 RETURN(0);
912
913         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
914
915         while (*s1) {
916                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
917                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
918                 if (tmp) {
919                         *flags |= tmp;
920                         goto next;
921                 }
922                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
923                 if (tmp) {
924                         *flags = (*flags & ~LL_SBI_LOCALFLOCK) | tmp;
925                         goto next;
926                 }
927                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
928                 if (tmp) {
929                         *flags = (*flags & ~LL_SBI_FLOCK) | tmp;
930                         goto next;
931                 }
932                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
933                 if (tmp) {
934                         *flags &= ~tmp;
935                         goto next;
936                 }
937                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
938                 if (tmp) {
939                         *flags |= tmp;
940                         goto next;
941                 }
942                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
943                 if (tmp) {
944                         *flags &= ~tmp;
945                         goto next;
946                 }
947                 tmp = ll_set_opt("context", s1, 1);
948                 if (tmp)
949                         goto next;
950                 tmp = ll_set_opt("fscontext", s1, 1);
951                 if (tmp)
952                         goto next;
953                 tmp = ll_set_opt("defcontext", s1, 1);
954                 if (tmp)
955                         goto next;
956                 tmp = ll_set_opt("rootcontext", s1, 1);
957                 if (tmp)
958                         goto next;
959                 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
960                 if (tmp) {
961                         *flags |= tmp;
962                         goto next;
963                 }
964                 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
965                 if (tmp) {
966                         *flags &= ~tmp;
967                         goto next;
968                 }
969
970                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
971                 if (tmp) {
972                         *flags |= tmp;
973                         sbi->ll_checksum_set = 1;
974                         goto next;
975                 }
976                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
977                 if (tmp) {
978                         *flags &= ~tmp;
979                         sbi->ll_checksum_set = 1;
980                         goto next;
981                 }
982                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
983                 if (tmp) {
984                         *flags |= tmp;
985                         goto next;
986                 }
987                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
988                 if (tmp) {
989                         *flags &= ~tmp;
990                         goto next;
991                 }
992                 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
993                 if (tmp) {
994                         *flags |= tmp;
995                         goto next;
996                 }
997                 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
998                 if (tmp) {
999                         *flags &= ~tmp;
1000                         goto next;
1001                 }
1002                 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
1003                 if (tmp) {
1004                         *flags |= tmp;
1005                         goto next;
1006                 }
1007                 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
1008                 if (tmp) {
1009                         *flags |= tmp;
1010                         goto next;
1011                 }
1012                 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
1013                 if (tmp) {
1014                         *flags &= ~tmp;
1015                         goto next;
1016                 }
1017                 tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
1018                 if (tmp) {
1019                         *flags |= tmp;
1020                         goto next;
1021                 }
1022                 tmp = ll_set_opt("test_dummy_encryption", s1,
1023                                  LL_SBI_TEST_DUMMY_ENCRYPTION);
1024                 if (tmp) {
1025 #ifdef HAVE_LUSTRE_CRYPTO
1026                         *flags |= tmp;
1027 #else
1028                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
1029 #endif
1030                         goto next;
1031                 }
1032                 tmp = ll_set_opt("noencrypt", s1, LL_SBI_ENCRYPT);
1033                 if (tmp) {
1034 #ifdef HAVE_LUSTRE_CRYPTO
1035                         *flags &= ~tmp;
1036 #else
1037                         LCONSOLE_WARN("noencrypt mount option ignored: encryption not supported\n");
1038 #endif
1039                         goto next;
1040                 }
1041                 tmp = ll_set_opt("foreign_symlink", s1, LL_SBI_FOREIGN_SYMLINK);
1042                 if (tmp) {
1043                         int prefix_pos = sizeof("foreign_symlink=") - 1;
1044                         int equal_pos = sizeof("foreign_symlink=") - 2;
1045
1046                         /* non-default prefix provided ? */
1047                         if (strlen(s1) >= sizeof("foreign_symlink=") &&
1048                             *(s1 + equal_pos) == '=') {
1049                                 char *old = sbi->ll_foreign_symlink_prefix;
1050                                 size_t old_len =
1051                                         sbi->ll_foreign_symlink_prefix_size;
1052
1053                                 /* path must be absolute */
1054                                 if (*(s1 + sizeof("foreign_symlink=")
1055                                       - 1) != '/') {
1056                                         LCONSOLE_ERROR_MSG(0x152,
1057                                                 "foreign prefix '%s' must be an absolute path\n",
1058                                                 s1 + prefix_pos);
1059                                         RETURN(-EINVAL);
1060                                 }
1061                                 /* last option ? */
1062                                 s2 = strchrnul(s1 + prefix_pos, ',');
1063
1064                                 if (sbi->ll_foreign_symlink_prefix) {
1065                                         sbi->ll_foreign_symlink_prefix = NULL;
1066                                         sbi->ll_foreign_symlink_prefix_size = 0;
1067                                 }
1068                                 /* alloc for path length and '\0' */
1069                                 OBD_ALLOC(sbi->ll_foreign_symlink_prefix,
1070                                                 s2 - (s1 + prefix_pos) + 1);
1071                                 if (!sbi->ll_foreign_symlink_prefix) {
1072                                         /* restore previous */
1073                                         sbi->ll_foreign_symlink_prefix = old;
1074                                         sbi->ll_foreign_symlink_prefix_size =
1075                                                 old_len;
1076                                         RETURN(-ENOMEM);
1077                                 }
1078                                 if (old)
1079                                         OBD_FREE(old, old_len);
1080                                 strncpy(sbi->ll_foreign_symlink_prefix,
1081                                         s1 + prefix_pos,
1082                                         s2 - (s1 + prefix_pos));
1083                                 sbi->ll_foreign_symlink_prefix_size =
1084                                         s2 - (s1 + prefix_pos) + 1;
1085                         } else {
1086                                 LCONSOLE_ERROR_MSG(0x152,
1087                                                    "invalid %s option\n", s1);
1088                         }
1089                         /* enable foreign symlink support */
1090                         *flags |= tmp;
1091                         goto next;
1092                 }
1093                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
1094                                    s1);
1095                 RETURN(-EINVAL);
1096
1097 next:
1098                 /* Find next opt */
1099                 s2 = strchr(s1, ',');
1100                 if (s2 == NULL)
1101                         break;
1102                 s1 = s2 + 1;
1103         }
1104         RETURN(0);
1105 }
1106
1107 void ll_lli_init(struct ll_inode_info *lli)
1108 {
1109         lli->lli_inode_magic = LLI_INODE_MAGIC;
1110         lli->lli_flags = 0;
1111         spin_lock_init(&lli->lli_lock);
1112         lli->lli_posix_acl = NULL;
1113         /* Do not set lli_fid, it has been initialized already. */
1114         fid_zero(&lli->lli_pfid);
1115         lli->lli_mds_read_och = NULL;
1116         lli->lli_mds_write_och = NULL;
1117         lli->lli_mds_exec_och = NULL;
1118         lli->lli_open_fd_read_count = 0;
1119         lli->lli_open_fd_write_count = 0;
1120         lli->lli_open_fd_exec_count = 0;
1121         mutex_init(&lli->lli_och_mutex);
1122         spin_lock_init(&lli->lli_agl_lock);
1123         spin_lock_init(&lli->lli_layout_lock);
1124         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1125         lli->lli_clob = NULL;
1126
1127         init_rwsem(&lli->lli_xattrs_list_rwsem);
1128         mutex_init(&lli->lli_xattrs_enq_lock);
1129
1130         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1131         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1132                 lli->lli_opendir_key = NULL;
1133                 lli->lli_sai = NULL;
1134                 spin_lock_init(&lli->lli_sa_lock);
1135                 lli->lli_opendir_pid = 0;
1136                 lli->lli_sa_enabled = 0;
1137                 init_rwsem(&lli->lli_lsm_sem);
1138         } else {
1139                 mutex_init(&lli->lli_size_mutex);
1140                 mutex_init(&lli->lli_setattr_mutex);
1141                 lli->lli_symlink_name = NULL;
1142                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1143                 range_lock_tree_init(&lli->lli_write_tree);
1144                 init_rwsem(&lli->lli_glimpse_sem);
1145                 lli->lli_glimpse_time = ktime_set(0, 0);
1146                 INIT_LIST_HEAD(&lli->lli_agl_list);
1147                 lli->lli_agl_index = 0;
1148                 lli->lli_async_rc = 0;
1149                 spin_lock_init(&lli->lli_heat_lock);
1150                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1151                 lli->lli_heat_flags = 0;
1152                 mutex_init(&lli->lli_pcc_lock);
1153                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1154                 lli->lli_pcc_inode = NULL;
1155                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1156                 lli->lli_pcc_generation = 0;
1157                 mutex_init(&lli->lli_group_mutex);
1158                 lli->lli_group_users = 0;
1159                 lli->lli_group_gid = 0;
1160         }
1161         mutex_init(&lli->lli_layout_mutex);
1162         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1163 }
1164
1165 #define MAX_STRING_SIZE 128
1166
1167 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1168
1169 #define LSI_BDI_INITIALIZED     0x00400000
1170
1171 #ifndef HAVE_BDI_CAP_MAP_COPY
1172 # define BDI_CAP_MAP_COPY       0
1173 #endif
1174
1175 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1176 {
1177         struct  lustre_sb_info *lsi = s2lsi(sb);
1178         char buf[MAX_STRING_SIZE];
1179         va_list args;
1180         int err;
1181
1182         err = bdi_init(&lsi->lsi_bdi);
1183         if (err)
1184                 return err;
1185
1186         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1187         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1188         lsi->lsi_bdi.name = "lustre";
1189         va_start(args, fmt);
1190         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1191         va_end(args);
1192         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1193         va_end(args);
1194         if (!err)
1195                 sb->s_bdi = &lsi->lsi_bdi;
1196
1197         return err;
1198 }
1199 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1200
1201 int ll_fill_super(struct super_block *sb)
1202 {
1203         struct  lustre_profile *lprof = NULL;
1204         struct  lustre_sb_info *lsi = s2lsi(sb);
1205         struct  ll_sb_info *sbi = NULL;
1206         char    *dt = NULL, *md = NULL;
1207         char    *profilenm = get_profile_name(sb);
1208         struct config_llog_instance *cfg;
1209         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1210         const int instlen = LUSTRE_MAXINSTANCE + 2;
1211         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1212         char name[MAX_STRING_SIZE];
1213         int md_len = 0;
1214         int dt_len = 0;
1215         uuid_t uuid;
1216         char *ptr;
1217         int len;
1218         int err;
1219
1220         ENTRY;
1221         /* for ASLR, to map between cfg_instance and hashed ptr */
1222         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1223                profilenm, cfg_instance, sb);
1224
1225         OBD_ALLOC_PTR(cfg);
1226         if (cfg == NULL)
1227                 GOTO(out_free_cfg, err = -ENOMEM);
1228
1229         /* client additional sb info */
1230         lsi->lsi_llsbi = sbi = ll_init_sbi();
1231         if (IS_ERR(sbi))
1232                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1233
1234         err = ll_options(lsi->lsi_lmd->lmd_opts, sbi);
1235         if (err)
1236                 GOTO(out_free_cfg, err);
1237
1238         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1239         sb->s_d_op = &ll_d_ops;
1240
1241         /* UUID handling */
1242         generate_random_uuid(uuid.b);
1243         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1244
1245         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1246
1247         /* Get fsname */
1248         len = strlen(profilenm);
1249         ptr = strrchr(profilenm, '-');
1250         if (ptr && (strcmp(ptr, "-client") == 0))
1251                 len -= 7;
1252
1253         if (len > LUSTRE_MAXFSNAME) {
1254                 if (unlikely(len >= MAX_STRING_SIZE))
1255                         len = MAX_STRING_SIZE - 1;
1256                 strncpy(name, profilenm, len);
1257                 name[len] = '\0';
1258                 err = -ENAMETOOLONG;
1259                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1260                        name, LUSTRE_MAXFSNAME, err);
1261                 GOTO(out_free_cfg, err);
1262         }
1263         strncpy(sbi->ll_fsname, profilenm, len);
1264         sbi->ll_fsname[len] = '\0';
1265
1266         /* Mount info */
1267         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1268                  profilenm, cfg_instance);
1269
1270         err = super_setup_bdi_name(sb, "%s", name);
1271         if (err)
1272                 GOTO(out_free_cfg, err);
1273
1274         /* Call ll_debugfs_register_super() before lustre_process_log()
1275          * so that "llite.*.*" params can be processed correctly.
1276          */
1277         err = ll_debugfs_register_super(sb, name);
1278         if (err < 0) {
1279                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1280                        sbi->ll_fsname, err);
1281                 err = 0;
1282         }
1283
1284         /* The cfg_instance is a value unique to this super, in case some
1285          * joker tries to mount the same fs at two mount points.
1286          */
1287         cfg->cfg_instance = cfg_instance;
1288         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1289         cfg->cfg_callback = class_config_llog_handler;
1290         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1291         /* set up client obds */
1292         err = lustre_process_log(sb, profilenm, cfg);
1293         if (err < 0)
1294                 GOTO(out_debugfs, err);
1295
1296         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1297         lprof = class_get_profile(profilenm);
1298         if (lprof == NULL) {
1299                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1300                                    " read from the MGS.  Does that filesystem "
1301                                    "exist?\n", profilenm);
1302                 GOTO(out_debugfs, err = -EINVAL);
1303         }
1304         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1305                lprof->lp_md, lprof->lp_dt);
1306
1307         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1308         OBD_ALLOC(dt, dt_len);
1309         if (!dt)
1310                 GOTO(out_profile, err = -ENOMEM);
1311         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1312
1313         md_len = strlen(lprof->lp_md) + instlen + 2;
1314         OBD_ALLOC(md, md_len);
1315         if (!md)
1316                 GOTO(out_free_dt, err = -ENOMEM);
1317         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1318
1319         /* connections, registrations, sb setup */
1320         err = client_common_fill_super(sb, md, dt);
1321         if (err < 0)
1322                 GOTO(out_free_md, err);
1323
1324         sbi->ll_client_common_fill_super_succeeded = 1;
1325
1326 out_free_md:
1327         if (md)
1328                 OBD_FREE(md, md_len);
1329 out_free_dt:
1330         if (dt)
1331                 OBD_FREE(dt, dt_len);
1332 out_profile:
1333         if (lprof)
1334                 class_put_profile(lprof);
1335 out_debugfs:
1336         if (err < 0)
1337                 ll_debugfs_unregister_super(sb);
1338 out_free_cfg:
1339         if (cfg)
1340                 OBD_FREE_PTR(cfg);
1341
1342         if (err)
1343                 ll_put_super(sb);
1344         else if (sbi->ll_flags & LL_SBI_VERBOSE)
1345                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1346         RETURN(err);
1347 } /* ll_fill_super */
1348
1349 void ll_put_super(struct super_block *sb)
1350 {
1351         struct config_llog_instance cfg, params_cfg;
1352         struct obd_device *obd;
1353         struct lustre_sb_info *lsi = s2lsi(sb);
1354         struct ll_sb_info *sbi = ll_s2sbi(sb);
1355         char *profilenm = get_profile_name(sb);
1356         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1357         long ccc_count;
1358         int next, force = 1, rc = 0;
1359         ENTRY;
1360
1361         if (IS_ERR(sbi))
1362                 GOTO(out_no_sbi, 0);
1363
1364         /* Should replace instance_id with something better for ASLR */
1365         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1366                profilenm, cfg_instance, sb);
1367
1368         cfg.cfg_instance = cfg_instance;
1369         lustre_end_log(sb, profilenm, &cfg);
1370
1371         params_cfg.cfg_instance = cfg_instance;
1372         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1373
1374         if (sbi->ll_md_exp) {
1375                 obd = class_exp2obd(sbi->ll_md_exp);
1376                 if (obd)
1377                         force = obd->obd_force;
1378         }
1379
1380         /* Wait for unstable pages to be committed to stable storage */
1381         if (force == 0) {
1382                 rc = l_wait_event_abortable(
1383                         sbi->ll_cache->ccc_unstable_waitq,
1384                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1385         }
1386
1387         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1388         if (force == 0 && rc != -ERESTARTSYS)
1389                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1390
1391         /* We need to set force before the lov_disconnect in
1392          * lustre_common_put_super, since l_d cleans up osc's as well.
1393          */
1394         if (force) {
1395                 next = 0;
1396                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1397                                                      &next)) != NULL) {
1398                         obd->obd_force = force;
1399                 }
1400         }
1401
1402         if (sbi->ll_client_common_fill_super_succeeded) {
1403                 /* Only if client_common_fill_super succeeded */
1404                 client_common_put_super(sb);
1405         }
1406
1407         next = 0;
1408         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1409                 class_manual_cleanup(obd);
1410
1411         if (sbi->ll_flags & LL_SBI_VERBOSE)
1412                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1413
1414         if (profilenm)
1415                 class_del_profile(profilenm);
1416
1417 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1418         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1419                 bdi_destroy(&lsi->lsi_bdi);
1420                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1421         }
1422 #endif
1423
1424         ll_free_sbi(sb);
1425         lsi->lsi_llsbi = NULL;
1426 out_no_sbi:
1427         lustre_common_put_super(sb);
1428
1429         cl_env_cache_purge(~0);
1430
1431         module_put(THIS_MODULE);
1432
1433         EXIT;
1434 } /* client_put_super */
1435
1436 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1437 {
1438         struct inode *inode = NULL;
1439
1440         /* NOTE: we depend on atomic igrab() -bzzz */
1441         lock_res_and_lock(lock);
1442         if (lock->l_resource->lr_lvb_inode) {
1443                 struct ll_inode_info * lli;
1444                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1445                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1446                         inode = igrab(lock->l_resource->lr_lvb_inode);
1447                 } else {
1448                         inode = lock->l_resource->lr_lvb_inode;
1449                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1450                                          D_WARNING, lock, "lr_lvb_inode %p is "
1451                                          "bogus: magic %08x",
1452                                          lock->l_resource->lr_lvb_inode,
1453                                          lli->lli_inode_magic);
1454                         inode = NULL;
1455                 }
1456         }
1457         unlock_res_and_lock(lock);
1458         return inode;
1459 }
1460
1461 void ll_dir_clear_lsm_md(struct inode *inode)
1462 {
1463         struct ll_inode_info *lli = ll_i2info(inode);
1464
1465         LASSERT(S_ISDIR(inode->i_mode));
1466
1467         if (lli->lli_lsm_md) {
1468                 lmv_free_memmd(lli->lli_lsm_md);
1469                 lli->lli_lsm_md = NULL;
1470         }
1471
1472         if (lli->lli_default_lsm_md) {
1473                 lmv_free_memmd(lli->lli_default_lsm_md);
1474                 lli->lli_default_lsm_md = NULL;
1475         }
1476 }
1477
1478 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1479                                       const struct lu_fid *fid,
1480                                       struct lustre_md *md)
1481 {
1482         struct ll_sb_info       *sbi = ll_s2sbi(sb);
1483         struct mdt_body         *body = md->body;
1484         struct inode            *inode;
1485         ino_t                   ino;
1486         ENTRY;
1487
1488         ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1489         inode = iget_locked(sb, ino);
1490         if (inode == NULL) {
1491                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1492                        sbi->ll_fsname, PFID(fid));
1493                 RETURN(ERR_PTR(-ENOENT));
1494         }
1495
1496         if (inode->i_state & I_NEW) {
1497                 struct ll_inode_info *lli = ll_i2info(inode);
1498                 struct lmv_stripe_md *lsm = md->lmv;
1499
1500                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1501                                 (body->mbo_mode & S_IFMT);
1502                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1503                          PFID(fid));
1504
1505                 inode->i_mtime.tv_sec = 0;
1506                 inode->i_atime.tv_sec = 0;
1507                 inode->i_ctime.tv_sec = 0;
1508                 inode->i_rdev = 0;
1509
1510 #ifdef HAVE_BACKING_DEV_INFO
1511                 /* initializing backing dev info. */
1512                 inode->i_mapping->backing_dev_info =
1513                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1514 #endif
1515                 inode->i_op = &ll_dir_inode_operations;
1516                 inode->i_fop = &ll_dir_operations;
1517                 lli->lli_fid = *fid;
1518                 ll_lli_init(lli);
1519
1520                 LASSERT(lsm != NULL);
1521                 /* master object FID */
1522                 lli->lli_pfid = body->mbo_fid1;
1523                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1524                        lli, PFID(fid), PFID(&lli->lli_pfid));
1525                 unlock_new_inode(inode);
1526         }
1527
1528         RETURN(inode);
1529 }
1530
1531 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1532 {
1533         struct lu_fid *fid;
1534         struct lmv_stripe_md *lsm = md->lmv;
1535         struct ll_inode_info *lli = ll_i2info(inode);
1536         int i;
1537
1538         LASSERT(lsm != NULL);
1539
1540         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1541                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1542         lsm_md_dump(D_INODE, lsm);
1543
1544         if (!lmv_dir_striped(lsm))
1545                 goto out;
1546
1547         /* XXX sigh, this lsm_root initialization should be in
1548          * LMV layer, but it needs ll_iget right now, so we
1549          * put this here right now. */
1550         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1551                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1552                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1553
1554                 if (!fid_is_sane(fid))
1555                         continue;
1556
1557                 /* Unfortunately ll_iget will call ll_update_inode,
1558                  * where the initialization of slave inode is slightly
1559                  * different, so it reset lsm_md to NULL to avoid
1560                  * initializing lsm for slave inode. */
1561                 lsm->lsm_md_oinfo[i].lmo_root =
1562                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1563                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1564                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1565
1566                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1567                         while (i-- > 0) {
1568                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1569                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1570                         }
1571                         return rc;
1572                 }
1573         }
1574 out:
1575         lli->lli_lsm_md = lsm;
1576
1577         return 0;
1578 }
1579
1580 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1581 {
1582         struct ll_inode_info *lli = ll_i2info(inode);
1583
1584         if (!md->default_lmv) {
1585                 /* clear default lsm */
1586                 if (lli->lli_default_lsm_md) {
1587                         down_write(&lli->lli_lsm_sem);
1588                         if (lli->lli_default_lsm_md) {
1589                                 lmv_free_memmd(lli->lli_default_lsm_md);
1590                                 lli->lli_default_lsm_md = NULL;
1591                         }
1592                         up_write(&lli->lli_lsm_sem);
1593                 }
1594         } else if (lli->lli_default_lsm_md) {
1595                 /* update default lsm if it changes */
1596                 down_read(&lli->lli_lsm_sem);
1597                 if (lli->lli_default_lsm_md &&
1598                     !lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1599                         up_read(&lli->lli_lsm_sem);
1600                         down_write(&lli->lli_lsm_sem);
1601                         if (lli->lli_default_lsm_md)
1602                                 lmv_free_memmd(lli->lli_default_lsm_md);
1603                         lli->lli_default_lsm_md = md->default_lmv;
1604                         lsm_md_dump(D_INODE, md->default_lmv);
1605                         md->default_lmv = NULL;
1606                         up_write(&lli->lli_lsm_sem);
1607                 } else {
1608                         up_read(&lli->lli_lsm_sem);
1609                 }
1610         } else {
1611                 /* init default lsm */
1612                 down_write(&lli->lli_lsm_sem);
1613                 lli->lli_default_lsm_md = md->default_lmv;
1614                 lsm_md_dump(D_INODE, md->default_lmv);
1615                 md->default_lmv = NULL;
1616                 up_write(&lli->lli_lsm_sem);
1617         }
1618 }
1619
1620 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1621 {
1622         struct ll_inode_info *lli = ll_i2info(inode);
1623         struct lmv_stripe_md *lsm = md->lmv;
1624         struct cl_attr  *attr;
1625         int rc = 0;
1626
1627         ENTRY;
1628
1629         LASSERT(S_ISDIR(inode->i_mode));
1630         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1631                PFID(ll_inode2fid(inode)));
1632
1633         /* update default LMV */
1634         if (md->default_lmv)
1635                 ll_update_default_lsm_md(inode, md);
1636
1637         /*
1638          * no striped information from request, lustre_md from req does not
1639          * include stripeEA, see ll_md_setattr()
1640          */
1641         if (!lsm)
1642                 RETURN(0);
1643
1644         /*
1645          * normally dir layout doesn't change, only take read lock to check
1646          * that to avoid blocking other MD operations.
1647          */
1648         down_read(&lli->lli_lsm_sem);
1649
1650         /* some current lookup initialized lsm, and unchanged */
1651         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1652                 GOTO(unlock, rc = 0);
1653
1654         /* if dir layout doesn't match, check whether version is increased,
1655          * which means layout is changed, this happens in dir split/merge and
1656          * lfsck.
1657          *
1658          * foreign LMV should not change.
1659          */
1660         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1661             lsm->lsm_md_layout_version <=
1662             lli->lli_lsm_md->lsm_md_layout_version) {
1663                 CERROR("%s: "DFID" dir layout mismatch:\n",
1664                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1665                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1666                 lsm_md_dump(D_ERROR, lsm);
1667                 GOTO(unlock, rc = -EINVAL);
1668         }
1669
1670         up_read(&lli->lli_lsm_sem);
1671         down_write(&lli->lli_lsm_sem);
1672         /* clear existing lsm */
1673         if (lli->lli_lsm_md) {
1674                 lmv_free_memmd(lli->lli_lsm_md);
1675                 lli->lli_lsm_md = NULL;
1676         }
1677
1678         rc = ll_init_lsm_md(inode, md);
1679         up_write(&lli->lli_lsm_sem);
1680
1681         if (rc)
1682                 RETURN(rc);
1683
1684         /* set md->lmv to NULL, so the following free lustre_md will not free
1685          * this lsm.
1686          */
1687         md->lmv = NULL;
1688
1689         /* md_merge_attr() may take long, since lsm is already set, switch to
1690          * read lock.
1691          */
1692         down_read(&lli->lli_lsm_sem);
1693
1694         if (!lmv_dir_striped(lli->lli_lsm_md))
1695                 GOTO(unlock, rc = 0);
1696
1697         OBD_ALLOC_PTR(attr);
1698         if (!attr)
1699                 GOTO(unlock, rc = -ENOMEM);
1700
1701         /* validate the lsm */
1702         rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1703                            ll_md_blocking_ast);
1704         if (!rc) {
1705                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1706                         md->body->mbo_nlink = attr->cat_nlink;
1707                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1708                         md->body->mbo_size = attr->cat_size;
1709                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1710                         md->body->mbo_atime = attr->cat_atime;
1711                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1712                         md->body->mbo_ctime = attr->cat_ctime;
1713                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1714                         md->body->mbo_mtime = attr->cat_mtime;
1715         }
1716
1717         OBD_FREE_PTR(attr);
1718         GOTO(unlock, rc);
1719 unlock:
1720         up_read(&lli->lli_lsm_sem);
1721
1722         return rc;
1723 }
1724
1725 void ll_clear_inode(struct inode *inode)
1726 {
1727         struct ll_inode_info *lli = ll_i2info(inode);
1728         struct ll_sb_info *sbi = ll_i2sbi(inode);
1729
1730         ENTRY;
1731
1732         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1733                PFID(ll_inode2fid(inode)), inode);
1734
1735         if (S_ISDIR(inode->i_mode)) {
1736                 /* these should have been cleared in ll_file_release */
1737                 LASSERT(lli->lli_opendir_key == NULL);
1738                 LASSERT(lli->lli_sai == NULL);
1739                 LASSERT(lli->lli_opendir_pid == 0);
1740         } else {
1741                 pcc_inode_free(inode);
1742         }
1743
1744         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1745
1746         LASSERT(!lli->lli_open_fd_write_count);
1747         LASSERT(!lli->lli_open_fd_read_count);
1748         LASSERT(!lli->lli_open_fd_exec_count);
1749
1750         if (lli->lli_mds_write_och)
1751                 ll_md_real_close(inode, FMODE_WRITE);
1752         if (lli->lli_mds_exec_och)
1753                 ll_md_real_close(inode, FMODE_EXEC);
1754         if (lli->lli_mds_read_och)
1755                 ll_md_real_close(inode, FMODE_READ);
1756
1757         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1758                 OBD_FREE(lli->lli_symlink_name,
1759                          strlen(lli->lli_symlink_name) + 1);
1760                 lli->lli_symlink_name = NULL;
1761         }
1762
1763         ll_xattr_cache_destroy(inode);
1764
1765         forget_all_cached_acls(inode);
1766         lli_clear_acl(lli);
1767         lli->lli_inode_magic = LLI_INODE_DEAD;
1768
1769         if (S_ISDIR(inode->i_mode))
1770                 ll_dir_clear_lsm_md(inode);
1771         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1772                 LASSERT(list_empty(&lli->lli_agl_list));
1773
1774         /*
1775          * XXX This has to be done before lsm is freed below, because
1776          * cl_object still uses inode lsm.
1777          */
1778         cl_inode_fini(inode);
1779
1780         llcrypt_put_encryption_info(inode);
1781
1782         EXIT;
1783 }
1784
1785 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1786 {
1787         struct lustre_md md;
1788         struct inode *inode = dentry->d_inode;
1789         struct ll_sb_info *sbi = ll_i2sbi(inode);
1790         struct ptlrpc_request *request = NULL;
1791         int rc, ia_valid;
1792         ENTRY;
1793
1794         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1795                                      LUSTRE_OPC_ANY, NULL);
1796         if (IS_ERR(op_data))
1797                 RETURN(PTR_ERR(op_data));
1798
1799         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1800         if (rc) {
1801                 ptlrpc_req_finished(request);
1802                 if (rc == -ENOENT) {
1803                         clear_nlink(inode);
1804                         /* Unlinked special device node? Or just a race?
1805                          * Pretend we done everything. */
1806                         if (!S_ISREG(inode->i_mode) &&
1807                             !S_ISDIR(inode->i_mode)) {
1808                                 ia_valid = op_data->op_attr.ia_valid;
1809                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1810                                 rc = simple_setattr(dentry, &op_data->op_attr);
1811                                 op_data->op_attr.ia_valid = ia_valid;
1812                         }
1813                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1814                         CERROR("md_setattr fails: rc = %d\n", rc);
1815                 }
1816                 RETURN(rc);
1817         }
1818
1819         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1820                               sbi->ll_md_exp, &md);
1821         if (rc) {
1822                 ptlrpc_req_finished(request);
1823                 RETURN(rc);
1824         }
1825
1826         ia_valid = op_data->op_attr.ia_valid;
1827         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1828          * cache is not cleared yet. */
1829         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1830         if (S_ISREG(inode->i_mode))
1831                 inode_lock(inode);
1832         rc = simple_setattr(dentry, &op_data->op_attr);
1833         if (S_ISREG(inode->i_mode))
1834                 inode_unlock(inode);
1835         op_data->op_attr.ia_valid = ia_valid;
1836
1837         rc = ll_update_inode(inode, &md);
1838         ptlrpc_req_finished(request);
1839
1840         RETURN(rc);
1841 }
1842
1843 /**
1844  * Zero portion of page that is part of @inode.
1845  * This implies, if necessary:
1846  * - taking cl_lock on range corresponding to concerned page
1847  * - grabbing vm page
1848  * - associating cl_page
1849  * - proceeding to clio read
1850  * - zeroing range in page
1851  * - proceeding to cl_page flush
1852  * - releasing cl_lock
1853  *
1854  * \param[in] inode     inode
1855  * \param[in] index     page index
1856  * \param[in] offset    offset in page to start zero from
1857  * \param[in] len       len to zero
1858  *
1859  * \retval 0            on success
1860  * \retval negative     errno on failure
1861  */
1862 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1863                     unsigned len)
1864 {
1865         struct ll_inode_info *lli = ll_i2info(inode);
1866         struct cl_object *clob = lli->lli_clob;
1867         __u16 refcheck;
1868         struct lu_env *env = NULL;
1869         struct cl_io *io = NULL;
1870         struct cl_page *clpage = NULL;
1871         struct page *vmpage = NULL;
1872         unsigned from = index << PAGE_SHIFT;
1873         struct cl_lock *lock = NULL;
1874         struct cl_lock_descr *descr = NULL;
1875         struct cl_2queue *queue = NULL;
1876         struct cl_sync_io *anchor = NULL;
1877         bool holdinglock = false;
1878         bool lockedbymyself = true;
1879         int rc;
1880
1881         ENTRY;
1882
1883         env = cl_env_get(&refcheck);
1884         if (IS_ERR(env))
1885                 RETURN(PTR_ERR(env));
1886
1887         io = vvp_env_thread_io(env);
1888         io->ci_obj = clob;
1889         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1890         if (rc)
1891                 GOTO(putenv, rc);
1892
1893         lock = vvp_env_lock(env);
1894         descr = &lock->cll_descr;
1895         descr->cld_obj   = io->ci_obj;
1896         descr->cld_start = cl_index(io->ci_obj, from);
1897         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1898         descr->cld_mode  = CLM_WRITE;
1899         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1900
1901         /* request lock for page */
1902         rc = cl_lock_request(env, io, lock);
1903         /* -ECANCELED indicates a matching lock with a different extent
1904          * was already present, and -EEXIST indicates a matching lock
1905          * on exactly the same extent was already present.
1906          * In both cases it means we are covered.
1907          */
1908         if (rc == -ECANCELED || rc == -EEXIST)
1909                 rc = 0;
1910         else if (rc < 0)
1911                 GOTO(iofini, rc);
1912         else
1913                 holdinglock = true;
1914
1915         /* grab page */
1916         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1917         if (vmpage == NULL)
1918                 GOTO(rellock, rc = -EOPNOTSUPP);
1919
1920         if (!PageDirty(vmpage)) {
1921                 /* associate cl_page */
1922                 clpage = cl_page_find(env, clob, vmpage->index,
1923                                       vmpage, CPT_CACHEABLE);
1924                 if (IS_ERR(clpage))
1925                         GOTO(pagefini, rc = PTR_ERR(clpage));
1926
1927                 cl_page_assume(env, io, clpage);
1928         }
1929
1930         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1931             !PageWriteback(vmpage)) {
1932                 /* read page */
1933                 /* set PagePrivate2 to detect special case of empty page
1934                  * in osc_brw_fini_request()
1935                  */
1936                 SetPagePrivate2(vmpage);
1937                 rc = ll_io_read_page(env, io, clpage, NULL);
1938                 if (!PagePrivate2(vmpage))
1939                         /* PagePrivate2 was cleared in osc_brw_fini_request()
1940                          * meaning we read an empty page. In this case, in order
1941                          * to avoid allocating unnecessary block in truncated
1942                          * file, we must not zero and write as below. Subsequent
1943                          * server-side truncate will handle things correctly.
1944                          */
1945                         GOTO(clpfini, rc = 0);
1946                 ClearPagePrivate2(vmpage);
1947                 if (rc)
1948                         GOTO(clpfini, rc);
1949                 lockedbymyself = trylock_page(vmpage);
1950                 cl_page_assume(env, io, clpage);
1951         }
1952
1953         /* zero range in page */
1954         zero_user(vmpage, offset, len);
1955
1956         if (holdinglock && clpage) {
1957                 /* explicitly write newly modified page */
1958                 queue = &io->ci_queue;
1959                 cl_2queue_init(queue);
1960                 anchor = &vvp_env_info(env)->vti_anchor;
1961                 cl_sync_io_init(anchor, 1);
1962                 clpage->cp_sync_io = anchor;
1963                 cl_2queue_add(queue, clpage);
1964                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
1965                 if (rc)
1966                         GOTO(queuefini1, rc);
1967                 rc = cl_sync_io_wait(env, anchor, 0);
1968                 if (rc)
1969                         GOTO(queuefini2, rc);
1970                 cl_page_assume(env, io, clpage);
1971
1972 queuefini2:
1973                 cl_2queue_discard(env, io, queue);
1974 queuefini1:
1975                 cl_2queue_disown(env, io, queue);
1976                 cl_2queue_fini(env, queue);
1977         }
1978
1979 clpfini:
1980         if (clpage)
1981                 cl_page_put(env, clpage);
1982 pagefini:
1983         if (lockedbymyself) {
1984                 unlock_page(vmpage);
1985                 put_page(vmpage);
1986         }
1987 rellock:
1988         if (holdinglock)
1989                 cl_lock_release(env, lock);
1990 iofini:
1991         cl_io_fini(env, io);
1992 putenv:
1993         if (env)
1994                 cl_env_put(env, &refcheck);
1995
1996         RETURN(rc);
1997 }
1998
1999 /* If this inode has objects allocated to it (lsm != NULL), then the OST
2000  * object(s) determine the file size and mtime.  Otherwise, the MDS will
2001  * keep these values until such a time that objects are allocated for it.
2002  * We do the MDS operations first, as it is checking permissions for us.
2003  * We don't to the MDS RPC if there is nothing that we want to store there,
2004  * otherwise there is no harm in updating mtime/atime on the MDS if we are
2005  * going to do an RPC anyways.
2006  *
2007  * If we are doing a truncate, we will send the mtime and ctime updates
2008  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
2009  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
2010  * at the same time.
2011  *
2012  * In case of HSMimport, we only set attr on MDS.
2013  */
2014 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
2015                    enum op_xvalid xvalid, bool hsm_import)
2016 {
2017         struct inode *inode = dentry->d_inode;
2018         struct ll_inode_info *lli = ll_i2info(inode);
2019         struct md_op_data *op_data = NULL;
2020         ktime_t kstart = ktime_get();
2021         int rc = 0;
2022
2023         ENTRY;
2024
2025         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
2026                "valid %x, hsm_import %d\n",
2027                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
2028                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
2029                hsm_import);
2030
2031         if (attr->ia_valid & ATTR_SIZE) {
2032                 /* Check new size against VFS/VM file size limit and rlimit */
2033                 rc = inode_newsize_ok(inode, attr->ia_size);
2034                 if (rc)
2035                         RETURN(rc);
2036
2037                 /* The maximum Lustre file size is variable, based on the
2038                  * OST maximum object size and number of stripes.  This
2039                  * needs another check in addition to the VFS check above. */
2040                 if (attr->ia_size > ll_file_maxbytes(inode)) {
2041                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
2042                                PFID(&lli->lli_fid), attr->ia_size,
2043                                ll_file_maxbytes(inode));
2044                         RETURN(-EFBIG);
2045                 }
2046
2047                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2048         }
2049
2050         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
2051         if (attr->ia_valid & TIMES_SET_FLAGS) {
2052                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2053                     !cfs_capable(CAP_FOWNER))
2054                         RETURN(-EPERM);
2055         }
2056
2057         /* We mark all of the fields "set" so MDS/OST does not re-set them */
2058         if (!(xvalid & OP_XVALID_CTIME_SET) &&
2059              (attr->ia_valid & ATTR_CTIME)) {
2060                 attr->ia_ctime = current_time(inode);
2061                 xvalid |= OP_XVALID_CTIME_SET;
2062         }
2063         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
2064             (attr->ia_valid & ATTR_ATIME)) {
2065                 attr->ia_atime = current_time(inode);
2066                 attr->ia_valid |= ATTR_ATIME_SET;
2067         }
2068         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
2069             (attr->ia_valid & ATTR_MTIME)) {
2070                 attr->ia_mtime = current_time(inode);
2071                 attr->ia_valid |= ATTR_MTIME_SET;
2072         }
2073
2074         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
2075                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
2076                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
2077                        ktime_get_real_seconds());
2078
2079         if (S_ISREG(inode->i_mode))
2080                 inode_unlock(inode);
2081
2082         /* We always do an MDS RPC, even if we're only changing the size;
2083          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
2084
2085         OBD_ALLOC_PTR(op_data);
2086         if (op_data == NULL)
2087                 GOTO(out, rc = -ENOMEM);
2088
2089         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
2090                 /* If we are changing file size, file content is
2091                  * modified, flag it.
2092                  */
2093                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2094                 op_data->op_bias |= MDS_DATA_MODIFIED;
2095                 ll_file_clear_flag(lli, LLIF_DATA_MODIFIED);
2096         }
2097
2098         if (attr->ia_valid & ATTR_FILE) {
2099                 struct ll_file_data *fd = attr->ia_file->private_data;
2100
2101                 if (fd->fd_lease_och)
2102                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2103         }
2104
2105         op_data->op_attr = *attr;
2106         op_data->op_xvalid = xvalid;
2107
2108         rc = ll_md_setattr(dentry, op_data);
2109         if (rc)
2110                 GOTO(out, rc);
2111
2112         if (!S_ISREG(inode->i_mode) || hsm_import)
2113                 GOTO(out, rc = 0);
2114
2115         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2116                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2117             xvalid & OP_XVALID_CTIME_SET) {
2118                 bool cached = false;
2119
2120                 rc = pcc_inode_setattr(inode, attr, &cached);
2121                 if (cached) {
2122                         if (rc) {
2123                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2124                                        "rc = %d\n",
2125                                        ll_i2sbi(inode)->ll_fsname,
2126                                        PFID(&lli->lli_fid), rc);
2127                                 GOTO(out, rc);
2128                         }
2129                 } else {
2130                         unsigned int flags = 0;
2131
2132                         /* For truncate and utimes sending attributes to OSTs,
2133                          * setting mtime/atime to the past will be performed
2134                          * under PW [0:EOF] extent lock (new_size:EOF for
2135                          * truncate). It may seem excessive to send mtime/atime
2136                          * updates to OSTs when not setting times to past, but
2137                          * it is necessary due to possible time
2138                          * de-synchronization between MDT inode and OST objects
2139                          */
2140                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
2141                             attr->ia_valid & ATTR_SIZE) {
2142                                 xvalid |= OP_XVALID_FLAGS;
2143                                 flags = LUSTRE_ENCRYPT_FL;
2144                                 /* Call to ll_io_zero_page is not necessary if
2145                                  * truncating on PAGE_SIZE boundary, because
2146                                  * whole pages will be wiped.
2147                                  * In case of Direct IO, all we need is to set
2148                                  * new size.
2149                                  */
2150                                 if (attr->ia_size & ~PAGE_MASK &&
2151                                     !(attr->ia_valid & ATTR_FILE &&
2152                                       attr->ia_file->f_flags & O_DIRECT)) {
2153                                         pgoff_t offset =
2154                                                 attr->ia_size & (PAGE_SIZE - 1);
2155
2156                                         rc = ll_io_zero_page(inode,
2157                                                     attr->ia_size >> PAGE_SHIFT,
2158                                                     offset, PAGE_SIZE - offset);
2159                                         if (rc)
2160                                                 GOTO(out, rc);
2161                                 }
2162                         }
2163                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2164                 }
2165         }
2166
2167         /* If the file was restored, it needs to set dirty flag.
2168          *
2169          * We've already sent MDS_DATA_MODIFIED flag in
2170          * ll_md_setattr() for truncate. However, the MDT refuses to
2171          * set the HS_DIRTY flag on released files, so we have to set
2172          * it again if the file has been restored. Please check how
2173          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2174          *
2175          * Please notice that if the file is not released, the previous
2176          * MDS_DATA_MODIFIED has taken effect and usually
2177          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2178          * This way we can save an RPC for common open + trunc
2179          * operation. */
2180         if (ll_file_test_and_clear_flag(lli, LLIF_DATA_MODIFIED)) {
2181                 struct hsm_state_set hss = {
2182                         .hss_valid = HSS_SETMASK,
2183                         .hss_setmask = HS_DIRTY,
2184                 };
2185                 int rc2;
2186
2187                 rc2 = ll_hsm_state_set(inode, &hss);
2188                 /* truncate and write can happen at the same time, so that
2189                  * the file can be set modified even though the file is not
2190                  * restored from released state, and ll_hsm_state_set() is
2191                  * not applicable for the file, and rc2 < 0 is normal in this
2192                  * case. */
2193                 if (rc2 < 0)
2194                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2195                                PFID(ll_inode2fid(inode)), rc2);
2196         }
2197
2198         EXIT;
2199 out:
2200         if (op_data != NULL)
2201                 ll_finish_md_op_data(op_data);
2202
2203         if (S_ISREG(inode->i_mode)) {
2204                 inode_lock(inode);
2205                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2206                         inode_dio_wait(inode);
2207                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2208                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2209                  * inode flags, so there is a gap where S_NOSEC is not set.
2210                  * This can cause a writer to take the i_mutex unnecessarily,
2211                  * but this is safe to do and should be rare. */
2212                 inode_has_no_xattr(inode);
2213         }
2214
2215         if (!rc)
2216                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2217                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2218                                    ktime_us_delta(ktime_get(), kstart));
2219
2220         return rc;
2221 }
2222
2223 int ll_setattr(struct dentry *de, struct iattr *attr)
2224 {
2225         int mode = de->d_inode->i_mode;
2226         enum op_xvalid xvalid = 0;
2227         int rc;
2228
2229         rc = llcrypt_prepare_setattr(de, attr);
2230         if (rc)
2231                 return rc;
2232
2233         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2234                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2235                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2236
2237         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2238                                (ATTR_SIZE|ATTR_MODE)) &&
2239             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2240              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2241               !(attr->ia_mode & S_ISGID))))
2242                 attr->ia_valid |= ATTR_FORCE;
2243
2244         if ((attr->ia_valid & ATTR_MODE) &&
2245             (mode & S_ISUID) &&
2246             !(attr->ia_mode & S_ISUID) &&
2247             !(attr->ia_valid & ATTR_KILL_SUID))
2248                 attr->ia_valid |= ATTR_KILL_SUID;
2249
2250         if ((attr->ia_valid & ATTR_MODE) &&
2251             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2252             !(attr->ia_mode & S_ISGID) &&
2253             !(attr->ia_valid & ATTR_KILL_SGID))
2254                 attr->ia_valid |= ATTR_KILL_SGID;
2255
2256         return ll_setattr_raw(de, attr, xvalid, false);
2257 }
2258
2259 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2260                        u32 flags)
2261 {
2262         struct obd_statfs obd_osfs = { 0 };
2263         time64_t max_age;
2264         int rc;
2265
2266         ENTRY;
2267         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2268
2269         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2270                 flags |= OBD_STATFS_NODELAY;
2271
2272         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2273         if (rc)
2274                 RETURN(rc);
2275
2276         osfs->os_type = LL_SUPER_MAGIC;
2277
2278         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2279               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2280
2281         if (osfs->os_state & OS_STATFS_SUM)
2282                 GOTO(out, rc);
2283
2284         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2285         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2286                 GOTO(out, rc = 0);
2287
2288         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2289                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2290                obd_osfs.os_files);
2291
2292         osfs->os_bsize = obd_osfs.os_bsize;
2293         osfs->os_blocks = obd_osfs.os_blocks;
2294         osfs->os_bfree = obd_osfs.os_bfree;
2295         osfs->os_bavail = obd_osfs.os_bavail;
2296
2297         /* If we have _some_ OSTs, but don't have as many free objects on the
2298          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2299          * to compensate, so that the "inodes in use" number is correct.
2300          * This should be kept in sync with lod_statfs() behaviour.
2301          */
2302         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2303                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2304                                  obd_osfs.os_ffree;
2305                 osfs->os_ffree = obd_osfs.os_ffree;
2306         }
2307
2308 out:
2309         RETURN(rc);
2310 }
2311
2312 static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
2313 {
2314         struct if_quotactl qctl = {
2315                 .qc_cmd = LUSTRE_Q_GETQUOTA,
2316                 .qc_type = PRJQUOTA,
2317                 .qc_valid = QC_GENERAL,
2318         };
2319         u64 limit, curblock;
2320         int ret;
2321
2322         qctl.qc_id = ll_i2info(inode)->lli_projid;
2323         ret = quotactl_ioctl(ll_i2sbi(inode), &qctl);
2324         if (ret) {
2325                 /* ignore errors if project ID does not have
2326                  * a quota limit or feature unsupported.
2327                  */
2328                 if (ret == -ESRCH || ret == -EOPNOTSUPP)
2329                         ret = 0;
2330                 return ret;
2331         }
2332
2333         limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
2334                  qctl.qc_dqblk.dqb_bsoftlimit :
2335                  qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
2336         if (limit && sfs->f_blocks > limit) {
2337                 curblock = (qctl.qc_dqblk.dqb_curspace +
2338                                 sfs->f_bsize - 1) / sfs->f_bsize;
2339                 sfs->f_blocks = limit;
2340                 sfs->f_bfree = sfs->f_bavail =
2341                         (sfs->f_blocks > curblock) ?
2342                         (sfs->f_blocks - curblock) : 0;
2343         }
2344
2345         limit = qctl.qc_dqblk.dqb_isoftlimit ?
2346                 qctl.qc_dqblk.dqb_isoftlimit :
2347                 qctl.qc_dqblk.dqb_ihardlimit;
2348         if (limit && sfs->f_files > limit) {
2349                 sfs->f_files = limit;
2350                 sfs->f_ffree = (sfs->f_files >
2351                         qctl.qc_dqblk.dqb_curinodes) ?
2352                         (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
2353         }
2354
2355         return 0;
2356 }
2357
2358 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2359 {
2360         struct super_block *sb = de->d_sb;
2361         struct obd_statfs osfs;
2362         __u64 fsid = huge_encode_dev(sb->s_dev);
2363         ktime_t kstart = ktime_get();
2364         int rc;
2365
2366         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2367
2368         /* Some amount of caching on the client is allowed */
2369         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2370         if (rc)
2371                 return rc;
2372
2373         statfs_unpack(sfs, &osfs);
2374
2375         /* We need to downshift for all 32-bit kernels, because we can't
2376          * tell if the kernel is being called via sys_statfs64() or not.
2377          * Stop before overflowing f_bsize - in which case it is better
2378          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2379         if (sizeof(long) < 8) {
2380                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2381                         sfs->f_bsize <<= 1;
2382
2383                         osfs.os_blocks >>= 1;
2384                         osfs.os_bfree >>= 1;
2385                         osfs.os_bavail >>= 1;
2386                 }
2387         }
2388
2389         sfs->f_blocks = osfs.os_blocks;
2390         sfs->f_bfree = osfs.os_bfree;
2391         sfs->f_bavail = osfs.os_bavail;
2392         sfs->f_fsid.val[0] = (__u32)fsid;
2393         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2394         if (ll_i2info(de->d_inode)->lli_projid)
2395                 return ll_statfs_project(de->d_inode, sfs);
2396
2397         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2398                            ktime_us_delta(ktime_get(), kstart));
2399
2400         return 0;
2401 }
2402
2403 void ll_inode_size_lock(struct inode *inode)
2404 {
2405         struct ll_inode_info *lli;
2406
2407         LASSERT(!S_ISDIR(inode->i_mode));
2408
2409         lli = ll_i2info(inode);
2410         mutex_lock(&lli->lli_size_mutex);
2411 }
2412
2413 void ll_inode_size_unlock(struct inode *inode)
2414 {
2415         struct ll_inode_info *lli;
2416
2417         lli = ll_i2info(inode);
2418         mutex_unlock(&lli->lli_size_mutex);
2419 }
2420
2421 void ll_update_inode_flags(struct inode *inode, int ext_flags)
2422 {
2423         /* do not clear encryption flag */
2424         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2425         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2426         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2427                 ll_file_set_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT);
2428         else
2429                 ll_file_clear_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT);
2430 }
2431
2432 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2433 {
2434         struct ll_inode_info *lli = ll_i2info(inode);
2435         struct mdt_body *body = md->body;
2436         struct ll_sb_info *sbi = ll_i2sbi(inode);
2437         int rc = 0;
2438
2439         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2440                 rc = cl_file_inode_init(inode, md);
2441                 if (rc)
2442                         return rc;
2443         }
2444
2445         if (S_ISDIR(inode->i_mode)) {
2446                 rc = ll_update_lsm_md(inode, md);
2447                 if (rc != 0)
2448                         return rc;
2449         }
2450
2451         if (body->mbo_valid & OBD_MD_FLACL)
2452                 lli_replace_acl(lli, md);
2453
2454         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
2455                                         sbi->ll_flags & LL_SBI_32BIT_API);
2456         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2457
2458         if (body->mbo_valid & OBD_MD_FLATIME) {
2459                 if (body->mbo_atime > inode->i_atime.tv_sec)
2460                         inode->i_atime.tv_sec = body->mbo_atime;
2461                 lli->lli_atime = body->mbo_atime;
2462         }
2463
2464         if (body->mbo_valid & OBD_MD_FLMTIME) {
2465                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2466                         CDEBUG(D_INODE,
2467                                "setting ino %lu mtime from %lld to %llu\n",
2468                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2469                                body->mbo_mtime);
2470                         inode->i_mtime.tv_sec = body->mbo_mtime;
2471                 }
2472                 lli->lli_mtime = body->mbo_mtime;
2473         }
2474
2475         if (body->mbo_valid & OBD_MD_FLCTIME) {
2476                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2477                         inode->i_ctime.tv_sec = body->mbo_ctime;
2478                 lli->lli_ctime = body->mbo_ctime;
2479         }
2480
2481         if (body->mbo_valid & OBD_MD_FLBTIME)
2482                 lli->lli_btime = body->mbo_btime;
2483
2484         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2485         if (body->mbo_valid & OBD_MD_FLFLAGS)
2486                 ll_update_inode_flags(inode, body->mbo_flags);
2487         if (body->mbo_valid & OBD_MD_FLMODE)
2488                 inode->i_mode = (inode->i_mode & S_IFMT) |
2489                                 (body->mbo_mode & ~S_IFMT);
2490
2491         if (body->mbo_valid & OBD_MD_FLTYPE)
2492                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2493                                 (body->mbo_mode & S_IFMT);
2494
2495         LASSERT(inode->i_mode != 0);
2496         if (body->mbo_valid & OBD_MD_FLUID)
2497                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2498         if (body->mbo_valid & OBD_MD_FLGID)
2499                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2500         if (body->mbo_valid & OBD_MD_FLPROJID)
2501                 lli->lli_projid = body->mbo_projid;
2502         if (body->mbo_valid & OBD_MD_FLNLINK)
2503                 set_nlink(inode, body->mbo_nlink);
2504         if (body->mbo_valid & OBD_MD_FLRDEV)
2505                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2506
2507         if (body->mbo_valid & OBD_MD_FLID) {
2508                 /* FID shouldn't be changed! */
2509                 if (fid_is_sane(&lli->lli_fid)) {
2510                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2511                                  "Trying to change FID "DFID
2512                                  " to the "DFID", inode "DFID"(%p)\n",
2513                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2514                                  PFID(ll_inode2fid(inode)), inode);
2515                 } else {
2516                         lli->lli_fid = body->mbo_fid1;
2517                 }
2518         }
2519
2520         LASSERT(fid_seq(&lli->lli_fid) != 0);
2521
2522         lli->lli_attr_valid = body->mbo_valid;
2523         if (body->mbo_valid & OBD_MD_FLSIZE) {
2524                 i_size_write(inode, body->mbo_size);
2525
2526                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2527                        PFID(ll_inode2fid(inode)),
2528                        (unsigned long long)body->mbo_size);
2529
2530                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2531                         inode->i_blocks = body->mbo_blocks;
2532         } else {
2533                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2534                         lli->lli_lazysize = body->mbo_size;
2535                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2536                         lli->lli_lazyblocks = body->mbo_blocks;
2537         }
2538
2539         if (body->mbo_valid & OBD_MD_TSTATE) {
2540                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2541                  * clear it when done to ensure to start again
2542                  * glimpsing updated attrs
2543                  */
2544                 if (body->mbo_t_state & MS_RESTORE)
2545                         ll_file_set_flag(lli, LLIF_FILE_RESTORING);
2546                 else
2547                         ll_file_clear_flag(lli, LLIF_FILE_RESTORING);
2548         }
2549
2550         return 0;
2551 }
2552
2553 int ll_read_inode2(struct inode *inode, void *opaque)
2554 {
2555         struct lustre_md *md = opaque;
2556         struct ll_inode_info *lli = ll_i2info(inode);
2557         int     rc;
2558         ENTRY;
2559
2560         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2561                PFID(&lli->lli_fid), inode);
2562
2563         /* Core attributes from the MDS first.  This is a new inode, and
2564          * the VFS doesn't zero times in the core inode so we have to do
2565          * it ourselves.  They will be overwritten by either MDS or OST
2566          * attributes - we just need to make sure they aren't newer.
2567          */
2568         inode->i_mtime.tv_sec = 0;
2569         inode->i_atime.tv_sec = 0;
2570         inode->i_ctime.tv_sec = 0;
2571         inode->i_rdev = 0;
2572         rc = ll_update_inode(inode, md);
2573         if (rc != 0)
2574                 RETURN(rc);
2575
2576         /* OIDEBUG(inode); */
2577
2578 #ifdef HAVE_BACKING_DEV_INFO
2579         /* initializing backing dev info. */
2580         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2581 #endif
2582         if (S_ISREG(inode->i_mode)) {
2583                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2584                 inode->i_op = &ll_file_inode_operations;
2585                 inode->i_fop = sbi->ll_fop;
2586                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2587                 EXIT;
2588         } else if (S_ISDIR(inode->i_mode)) {
2589                 inode->i_op = &ll_dir_inode_operations;
2590                 inode->i_fop = &ll_dir_operations;
2591                 EXIT;
2592         } else if (S_ISLNK(inode->i_mode)) {
2593                 inode->i_op = &ll_fast_symlink_inode_operations;
2594                 EXIT;
2595         } else {
2596                 inode->i_op = &ll_special_inode_operations;
2597
2598                 init_special_inode(inode, inode->i_mode,
2599                                    inode->i_rdev);
2600
2601                 EXIT;
2602         }
2603
2604         return 0;
2605 }
2606
2607 void ll_delete_inode(struct inode *inode)
2608 {
2609         struct ll_inode_info *lli = ll_i2info(inode);
2610         struct address_space *mapping = &inode->i_data;
2611         unsigned long nrpages;
2612         unsigned long flags;
2613
2614         ENTRY;
2615
2616         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2617                 /* It is last chance to write out dirty pages,
2618                  * otherwise we may lose data while umount.
2619                  *
2620                  * If i_nlink is 0 then just discard data. This is safe because
2621                  * local inode gets i_nlink 0 from server only for the last
2622                  * unlink, so that file is not opened somewhere else
2623                  */
2624                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2625                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2626         }
2627         truncate_inode_pages_final(mapping);
2628
2629         /* Workaround for LU-118: Note nrpages may not be totally updated when
2630          * truncate_inode_pages() returns, as there can be a page in the process
2631          * of deletion (inside __delete_from_page_cache()) in the specified
2632          * range. Thus mapping->nrpages can be non-zero when this function
2633          * returns even after truncation of the whole mapping.  Only do this if
2634          * npages isn't already zero.
2635          */
2636         nrpages = mapping->nrpages;
2637         if (nrpages) {
2638                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2639                 nrpages = mapping->nrpages;
2640                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2641         } /* Workaround end */
2642
2643         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2644                  "see https://jira.whamcloud.com/browse/LU-118\n",
2645                  ll_i2sbi(inode)->ll_fsname,
2646                  PFID(ll_inode2fid(inode)), inode, nrpages);
2647
2648         ll_clear_inode(inode);
2649         clear_inode(inode);
2650
2651         EXIT;
2652 }
2653
2654 int ll_iocontrol(struct inode *inode, struct file *file,
2655                  unsigned int cmd, unsigned long arg)
2656 {
2657         struct ll_sb_info *sbi = ll_i2sbi(inode);
2658         struct ptlrpc_request *req = NULL;
2659         int rc, flags = 0;
2660         ENTRY;
2661
2662         switch (cmd) {
2663         case FS_IOC_GETFLAGS: {
2664                 struct mdt_body *body;
2665                 struct md_op_data *op_data;
2666
2667                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2668                                              0, 0, LUSTRE_OPC_ANY,
2669                                              NULL);
2670                 if (IS_ERR(op_data))
2671                         RETURN(PTR_ERR(op_data));
2672
2673                 op_data->op_valid = OBD_MD_FLFLAGS;
2674                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2675                 ll_finish_md_op_data(op_data);
2676                 if (rc) {
2677                         CERROR("%s: failure inode "DFID": rc = %d\n",
2678                                sbi->ll_md_exp->exp_obd->obd_name,
2679                                PFID(ll_inode2fid(inode)), rc);
2680                         RETURN(-abs(rc));
2681                 }
2682
2683                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2684
2685                 flags = body->mbo_flags;
2686
2687                 ptlrpc_req_finished(req);
2688
2689                 RETURN(put_user(flags, (int __user *)arg));
2690         }
2691         case FS_IOC_SETFLAGS: {
2692                 struct iattr *attr;
2693                 struct md_op_data *op_data;
2694                 struct cl_object *obj;
2695                 struct fsxattr fa = { 0 };
2696
2697                 if (get_user(flags, (int __user *)arg))
2698                         RETURN(-EFAULT);
2699
2700                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2701                 if (flags & LUSTRE_PROJINHERIT_FL)
2702                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2703
2704                 rc = ll_ioctl_check_project(inode, &fa);
2705                 if (rc)
2706                         RETURN(rc);
2707
2708                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2709                                              LUSTRE_OPC_ANY, NULL);
2710                 if (IS_ERR(op_data))
2711                         RETURN(PTR_ERR(op_data));
2712
2713                 op_data->op_attr_flags = flags;
2714                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2715                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2716                 ll_finish_md_op_data(op_data);
2717                 ptlrpc_req_finished(req);
2718                 if (rc)
2719                         RETURN(rc);
2720
2721                 ll_update_inode_flags(inode, flags);
2722
2723                 obj = ll_i2info(inode)->lli_clob;
2724                 if (obj == NULL)
2725                         RETURN(0);
2726
2727                 OBD_ALLOC_PTR(attr);
2728                 if (attr == NULL)
2729                         RETURN(-ENOMEM);
2730
2731                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2732
2733                 OBD_FREE_PTR(attr);
2734                 RETURN(rc);
2735         }
2736         default:
2737                 RETURN(-ENOSYS);
2738         }
2739
2740         RETURN(0);
2741 }
2742
2743 int ll_flush_ctx(struct inode *inode)
2744 {
2745         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2746
2747         CDEBUG(D_SEC, "flush context for user %d\n",
2748                from_kuid(&init_user_ns, current_uid()));
2749
2750         obd_set_info_async(NULL, sbi->ll_md_exp,
2751                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2752                            0, NULL, NULL);
2753         obd_set_info_async(NULL, sbi->ll_dt_exp,
2754                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2755                            0, NULL, NULL);
2756         return 0;
2757 }
2758
2759 /* umount -f client means force down, don't save state */
2760 void ll_umount_begin(struct super_block *sb)
2761 {
2762         struct ll_sb_info *sbi = ll_s2sbi(sb);
2763         struct obd_device *obd;
2764         struct obd_ioctl_data *ioc_data;
2765         int cnt;
2766         ENTRY;
2767
2768         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2769                sb->s_count, atomic_read(&sb->s_active));
2770
2771         obd = class_exp2obd(sbi->ll_md_exp);
2772         if (obd == NULL) {
2773                 CERROR("Invalid MDC connection handle %#llx\n",
2774                        sbi->ll_md_exp->exp_handle.h_cookie);
2775                 EXIT;
2776                 return;
2777         }
2778         obd->obd_force = 1;
2779
2780         obd = class_exp2obd(sbi->ll_dt_exp);
2781         if (obd == NULL) {
2782                 CERROR("Invalid LOV connection handle %#llx\n",
2783                        sbi->ll_dt_exp->exp_handle.h_cookie);
2784                 EXIT;
2785                 return;
2786         }
2787         obd->obd_force = 1;
2788
2789         OBD_ALLOC_PTR(ioc_data);
2790         if (ioc_data) {
2791                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2792                               sizeof *ioc_data, ioc_data, NULL);
2793
2794                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2795                               sizeof *ioc_data, ioc_data, NULL);
2796
2797                 OBD_FREE_PTR(ioc_data);
2798         }
2799
2800         /* Really, we'd like to wait until there are no requests outstanding,
2801          * and then continue.  For now, we just periodically checking for vfs
2802          * to decrement mnt_cnt and hope to finish it within 10sec.
2803          */
2804         cnt = 10;
2805         while (cnt > 0 &&
2806                !may_umount(sbi->ll_mnt.mnt)) {
2807                 ssleep(1);
2808                 cnt -= 1;
2809         }
2810
2811         EXIT;
2812 }
2813
2814 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2815 {
2816         struct ll_sb_info *sbi = ll_s2sbi(sb);
2817         char *profilenm = get_profile_name(sb);
2818         int err;
2819         __u32 read_only;
2820
2821         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
2822                 read_only = *flags & MS_RDONLY;
2823                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2824                                          sizeof(KEY_READ_ONLY),
2825                                          KEY_READ_ONLY, sizeof(read_only),
2826                                          &read_only, NULL);
2827                 if (err) {
2828                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2829                                       profilenm, read_only ?
2830                                       "read-only" : "read-write", err);
2831                         return err;
2832                 }
2833
2834                 if (read_only)
2835                         sb->s_flags |= SB_RDONLY;
2836                 else
2837                         sb->s_flags &= ~SB_RDONLY;
2838
2839                 if (sbi->ll_flags & LL_SBI_VERBOSE)
2840                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2841                                       read_only ?  "read-only" : "read-write");
2842         }
2843         return 0;
2844 }
2845
2846 /**
2847  * Cleanup the open handle that is cached on MDT-side.
2848  *
2849  * For open case, the client side open handling thread may hit error
2850  * after the MDT grant the open. Under such case, the client should
2851  * send close RPC to the MDT as cleanup; otherwise, the open handle
2852  * on the MDT will be leaked there until the client umount or evicted.
2853  *
2854  * In further, if someone unlinked the file, because the open handle
2855  * holds the reference on such file/object, then it will block the
2856  * subsequent threads that want to locate such object via FID.
2857  *
2858  * \param[in] sb        super block for this file-system
2859  * \param[in] open_req  pointer to the original open request
2860  */
2861 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2862 {
2863         struct mdt_body                 *body;
2864         struct md_op_data               *op_data;
2865         struct ptlrpc_request           *close_req = NULL;
2866         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
2867         ENTRY;
2868
2869         body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
2870         OBD_ALLOC_PTR(op_data);
2871         if (op_data == NULL) {
2872                 CWARN("%s: cannot allocate op_data to release open handle for "
2873                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
2874
2875                 RETURN_EXIT;
2876         }
2877
2878         op_data->op_fid1 = body->mbo_fid1;
2879         op_data->op_open_handle = body->mbo_open_handle;
2880         op_data->op_mod_time = ktime_get_real_seconds();
2881         md_close(exp, op_data, NULL, &close_req);
2882         ptlrpc_req_finished(close_req);
2883         ll_finish_md_op_data(op_data);
2884
2885         EXIT;
2886 }
2887
2888 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2889                   struct super_block *sb, struct lookup_intent *it)
2890 {
2891         struct ll_sb_info *sbi = NULL;
2892         struct lustre_md md = { NULL };
2893         bool default_lmv_deleted = false;
2894         int rc;
2895
2896         ENTRY;
2897
2898         LASSERT(*inode || sb);
2899         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2900         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2901                               sbi->ll_md_exp, &md);
2902         if (rc != 0)
2903                 GOTO(out, rc);
2904
2905         /*
2906          * clear default_lmv only if intent_getattr reply doesn't contain it.
2907          * but it needs to be done after iget, check this early because
2908          * ll_update_lsm_md() may change md.
2909          */
2910         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
2911             S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
2912                 default_lmv_deleted = true;
2913
2914         if (*inode) {
2915                 rc = ll_update_inode(*inode, &md);
2916                 if (rc != 0)
2917                         GOTO(out, rc);
2918         } else {
2919                 LASSERT(sb != NULL);
2920
2921                 /*
2922                  * At this point server returns to client's same fid as client
2923                  * generated for creating. So using ->fid1 is okay here.
2924                  */
2925                 if (!fid_is_sane(&md.body->mbo_fid1)) {
2926                         CERROR("%s: Fid is insane "DFID"\n",
2927                                 sbi->ll_fsname,
2928                                 PFID(&md.body->mbo_fid1));
2929                         GOTO(out, rc = -EINVAL);
2930                 }
2931
2932                 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
2933                                              sbi->ll_flags & LL_SBI_32BIT_API),
2934                                  &md);
2935                 if (IS_ERR(*inode)) {
2936                         lmd_clear_acl(&md);
2937                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2938                         *inode = NULL;
2939                         CERROR("new_inode -fatal: rc %d\n", rc);
2940                         GOTO(out, rc);
2941                 }
2942         }
2943
2944         /* Handling piggyback layout lock.
2945          * Layout lock can be piggybacked by getattr and open request.
2946          * The lsm can be applied to inode only if it comes with a layout lock
2947          * otherwise correct layout may be overwritten, for example:
2948          * 1. proc1: mdt returns a lsm but not granting layout
2949          * 2. layout was changed by another client
2950          * 3. proc2: refresh layout and layout lock granted
2951          * 4. proc1: to apply a stale layout */
2952         if (it != NULL && it->it_lock_mode != 0) {
2953                 struct lustre_handle lockh;
2954                 struct ldlm_lock *lock;
2955
2956                 lockh.cookie = it->it_lock_handle;
2957                 lock = ldlm_handle2lock(&lockh);
2958                 LASSERT(lock != NULL);
2959                 if (ldlm_has_layout(lock)) {
2960                         struct cl_object_conf conf;
2961
2962                         memset(&conf, 0, sizeof(conf));
2963                         conf.coc_opc = OBJECT_CONF_SET;
2964                         conf.coc_inode = *inode;
2965                         conf.coc_lock = lock;
2966                         conf.u.coc_layout = md.layout;
2967                         (void)ll_layout_conf(*inode, &conf);
2968                 }
2969                 LDLM_LOCK_PUT(lock);
2970         }
2971
2972         if (default_lmv_deleted)
2973                 ll_update_default_lsm_md(*inode, &md);
2974
2975         /* we may want to apply some policy for foreign file/dir */
2976         if (ll_sbi_has_foreign_symlink(sbi)) {
2977                 rc = ll_manage_foreign(*inode, &md);
2978                 if (rc < 0)
2979                         GOTO(out, rc);
2980         }
2981
2982         GOTO(out, rc = 0);
2983
2984 out:
2985         /* cleanup will be done if necessary */
2986         md_free_lustre_md(sbi->ll_md_exp, &md);
2987
2988         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
2989                 ll_intent_drop_lock(it);
2990                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, req);
2991         }
2992
2993         return rc;
2994 }
2995
2996 int ll_obd_statfs(struct inode *inode, void __user *arg)
2997 {
2998         struct ll_sb_info *sbi = NULL;
2999         struct obd_export *exp;
3000         struct obd_ioctl_data *data = NULL;
3001         __u32 type;
3002         int len = 0, rc;
3003
3004         if (inode)
3005                 sbi = ll_i2sbi(inode);
3006         if (!sbi)
3007                 GOTO(out_statfs, rc = -EINVAL);
3008
3009         rc = obd_ioctl_getdata(&data, &len, arg);
3010         if (rc)
3011                 GOTO(out_statfs, rc);
3012
3013         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
3014             !data->ioc_pbuf1 || !data->ioc_pbuf2)
3015                 GOTO(out_statfs, rc = -EINVAL);
3016
3017         if (data->ioc_inllen1 != sizeof(__u32) ||
3018             data->ioc_inllen2 != sizeof(__u32) ||
3019             data->ioc_plen1 != sizeof(struct obd_statfs) ||
3020             data->ioc_plen2 != sizeof(struct obd_uuid))
3021                 GOTO(out_statfs, rc = -EINVAL);
3022
3023         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
3024         if (type & LL_STATFS_LMV)
3025                 exp = sbi->ll_md_exp;
3026         else if (type & LL_STATFS_LOV)
3027                 exp = sbi->ll_dt_exp;
3028         else
3029                 GOTO(out_statfs, rc = -ENODEV);
3030
3031         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL);
3032         if (rc)
3033                 GOTO(out_statfs, rc);
3034 out_statfs:
3035         OBD_FREE_LARGE(data, len);
3036         return rc;
3037 }
3038
3039 /*
3040  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
3041  * be called early to avoid deadlock.
3042  */
3043 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
3044 {
3045         if (op_data->op_mea2_sem) {
3046                 up_read_non_owner(op_data->op_mea2_sem);
3047                 op_data->op_mea2_sem = NULL;
3048         }
3049
3050         if (op_data->op_mea1_sem) {
3051                 up_read_non_owner(op_data->op_mea1_sem);
3052                 op_data->op_mea1_sem = NULL;
3053         }
3054 }
3055
3056 /* this function prepares md_op_data hint for passing it down to MD stack. */
3057 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
3058                                       struct inode *i1, struct inode *i2,
3059                                       const char *name, size_t namelen,
3060                                       __u32 mode, enum md_op_code opc,
3061                                       void *data)
3062 {
3063         LASSERT(i1 != NULL);
3064
3065         if (name == NULL) {
3066                 /* Do not reuse namelen for something else. */
3067                 if (namelen != 0)
3068                         return ERR_PTR(-EINVAL);
3069         } else {
3070                 if (namelen > ll_i2sbi(i1)->ll_namelen)
3071                         return ERR_PTR(-ENAMETOOLONG);
3072
3073                 /* "/" is not valid name, but it's allowed */
3074                 if (!lu_name_is_valid_2(name, namelen) &&
3075                     strncmp("/", name, namelen) != 0)
3076                         return ERR_PTR(-EINVAL);
3077         }
3078
3079         if (op_data == NULL)
3080                 OBD_ALLOC_PTR(op_data);
3081
3082         if (op_data == NULL)
3083                 return ERR_PTR(-ENOMEM);
3084
3085         ll_i2gids(op_data->op_suppgids, i1, i2);
3086         op_data->op_fid1 = *ll_inode2fid(i1);
3087         op_data->op_code = opc;
3088
3089         if (S_ISDIR(i1->i_mode)) {
3090                 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
3091                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
3092                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
3093                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
3094         }
3095
3096         if (i2) {
3097                 op_data->op_fid2 = *ll_inode2fid(i2);
3098                 if (S_ISDIR(i2->i_mode)) {
3099                         if (i2 != i1) {
3100                                 /* i2 is typically a child of i1, and MUST be
3101                                  * further from the root to avoid deadlocks.
3102                                  */
3103                                 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
3104                                 op_data->op_mea2_sem =
3105                                                 &ll_i2info(i2)->lli_lsm_sem;
3106                         }
3107                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
3108                 }
3109         } else {
3110                 fid_zero(&op_data->op_fid2);
3111         }
3112
3113         if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
3114                 op_data->op_cli_flags |= CLI_HASH64;
3115
3116         if (ll_need_32bit_api(ll_i2sbi(i1)))
3117                 op_data->op_cli_flags |= CLI_API32;
3118
3119         op_data->op_name = name;
3120         op_data->op_namelen = namelen;
3121         op_data->op_mode = mode;
3122         op_data->op_mod_time = ktime_get_real_seconds();
3123         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
3124         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
3125         op_data->op_cap = cfs_curproc_cap_pack();
3126         op_data->op_mds = 0;
3127         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
3128              filename_is_volatile(name, namelen, &op_data->op_mds)) {
3129                 op_data->op_bias |= MDS_CREATE_VOLATILE;
3130         }
3131         op_data->op_data = data;
3132
3133         return op_data;
3134 }
3135
3136 void ll_finish_md_op_data(struct md_op_data *op_data)
3137 {
3138         ll_unlock_md_op_lsm(op_data);
3139         security_release_secctx(op_data->op_file_secctx,
3140                                 op_data->op_file_secctx_size);
3141         llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size);
3142         OBD_FREE_PTR(op_data);
3143 }
3144
3145 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
3146 {
3147         struct ll_sb_info *sbi;
3148
3149         LASSERT(seq && dentry);
3150         sbi = ll_s2sbi(dentry->d_sb);
3151
3152         if (sbi->ll_flags & LL_SBI_NOLCK)
3153                 seq_puts(seq, ",nolock");
3154
3155         /* "flock" is the default since 2.13, but it wasn't for many years,
3156          * so it is still useful to print this to show it is enabled.
3157          * Start to print "noflock" so it is now clear when flock is disabled.
3158          */
3159         if (sbi->ll_flags & LL_SBI_FLOCK)
3160                 seq_puts(seq, ",flock");
3161         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
3162                 seq_puts(seq, ",localflock");
3163         else
3164                 seq_puts(seq, ",noflock");
3165
3166         if (sbi->ll_flags & LL_SBI_USER_XATTR)
3167                 seq_puts(seq, ",user_xattr");
3168
3169         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
3170                 seq_puts(seq, ",lazystatfs");
3171
3172         if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
3173                 seq_puts(seq, ",user_fid2path");
3174
3175         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
3176                 seq_puts(seq, ",always_ping");
3177
3178         if (ll_sbi_has_test_dummy_encryption(sbi))
3179                 seq_puts(seq, ",test_dummy_encryption");
3180
3181         if (ll_sbi_has_encrypt(sbi))
3182                 seq_puts(seq, ",encrypt");
3183         else
3184                 seq_puts(seq, ",noencrypt");
3185
3186         if (sbi->ll_flags & LL_SBI_FOREIGN_SYMLINK) {
3187                 seq_puts(seq, ",foreign_symlink=");
3188                 seq_puts(seq, sbi->ll_foreign_symlink_prefix);
3189         }
3190
3191         RETURN(0);
3192 }
3193
3194 /**
3195  * Get obd name by cmd, and copy out to user space
3196  */
3197 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3198 {
3199         struct ll_sb_info *sbi = ll_i2sbi(inode);
3200         struct obd_device *obd;
3201         ENTRY;
3202
3203         if (cmd == OBD_IOC_GETDTNAME)
3204                 obd = class_exp2obd(sbi->ll_dt_exp);
3205         else if (cmd == OBD_IOC_GETMDNAME)
3206                 obd = class_exp2obd(sbi->ll_md_exp);
3207         else
3208                 RETURN(-EINVAL);
3209
3210         if (!obd)
3211                 RETURN(-ENOENT);
3212
3213         if (copy_to_user((void __user *)arg, obd->obd_name,
3214                          strlen(obd->obd_name) + 1))
3215                 RETURN(-EFAULT);
3216
3217         RETURN(0);
3218 }
3219
3220 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3221 {
3222         char *path = NULL;
3223
3224         struct path p;
3225
3226         p.dentry = dentry;
3227         p.mnt = current->fs->root.mnt;
3228         path_get(&p);
3229         path = d_path(&p, buf, bufsize);
3230         path_put(&p);
3231         return path;
3232 }
3233
3234 void ll_dirty_page_discard_warn(struct page *page, int ioret)
3235 {
3236         char *buf, *path = NULL;
3237         struct dentry *dentry = NULL;
3238         struct inode *inode = page->mapping->host;
3239
3240         /* this can be called inside spin lock so use GFP_ATOMIC. */
3241         buf = (char *)__get_free_page(GFP_ATOMIC);
3242         if (buf != NULL) {
3243                 dentry = d_find_alias(page->mapping->host);
3244                 if (dentry != NULL)
3245                         path = ll_d_path(dentry, buf, PAGE_SIZE);
3246         }
3247
3248         /* The below message is checked in recovery-small.sh test_24b */
3249         CDEBUG(D_WARNING,
3250                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3251                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3252                s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
3253                PFID(ll_inode2fid(inode)),
3254                (path && !IS_ERR(path)) ? path : "", ioret);
3255
3256         if (dentry != NULL)
3257                 dput(dentry);
3258
3259         if (buf != NULL)
3260                 free_page((unsigned long)buf);
3261 }
3262
3263 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3264                         struct lov_user_md **kbuf)
3265 {
3266         struct lov_user_md      lum;
3267         ssize_t                 lum_size;
3268         ENTRY;
3269
3270         if (copy_from_user(&lum, md, sizeof(lum)))
3271                 RETURN(-EFAULT);
3272
3273         lum_size = ll_lov_user_md_size(&lum);
3274         if (lum_size < 0)
3275                 RETURN(lum_size);
3276
3277         OBD_ALLOC_LARGE(*kbuf, lum_size);
3278         if (*kbuf == NULL)
3279                 RETURN(-ENOMEM);
3280
3281         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3282                 OBD_FREE_LARGE(*kbuf, lum_size);
3283                 RETURN(-EFAULT);
3284         }
3285
3286         RETURN(lum_size);
3287 }
3288
3289 /*
3290  * Compute llite root squash state after a change of root squash
3291  * configuration setting or add/remove of a lnet nid
3292  */
3293 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3294 {
3295         struct root_squash_info *squash = &sbi->ll_squash;
3296         int i;
3297         bool matched;
3298         struct lnet_process_id id;
3299
3300         /* Update norootsquash flag */
3301         spin_lock(&squash->rsi_lock);
3302         if (list_empty(&squash->rsi_nosquash_nids))
3303                 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3304         else {
3305                 /* Do not apply root squash as soon as one of our NIDs is
3306                  * in the nosquash_nids list */
3307                 matched = false;
3308                 i = 0;
3309                 while (LNetGetId(i++, &id) != -ENOENT) {
3310                         if (id.nid == LNET_NID_LO_0)
3311                                 continue;
3312                         if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
3313                                 matched = true;
3314                                 break;
3315                         }
3316                 }
3317                 if (matched)
3318                         sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
3319                 else
3320                         sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3321         }
3322         spin_unlock(&squash->rsi_lock);
3323 }
3324
3325 /**
3326  * Parse linkea content to extract information about a given hardlink
3327  *
3328  * \param[in]   ldata      - Initialized linkea data
3329  * \param[in]   linkno     - Link identifier
3330  * \param[out]  parent_fid - The entry's parent FID
3331  * \param[out]  ln         - Entry name destination buffer
3332  *
3333  * \retval 0 on success
3334  * \retval Appropriate negative error code on failure
3335  */
3336 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3337                             struct lu_fid *parent_fid, struct lu_name *ln)
3338 {
3339         unsigned int    idx;
3340         int             rc;
3341         ENTRY;
3342
3343         rc = linkea_init_with_rec(ldata);
3344         if (rc < 0)
3345                 RETURN(rc);
3346
3347         if (linkno >= ldata->ld_leh->leh_reccount)
3348                 /* beyond last link */
3349                 RETURN(-ENODATA);
3350
3351         linkea_first_entry(ldata);
3352         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3353                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3354                                     parent_fid);
3355                 if (idx == linkno)
3356                         break;
3357
3358                 linkea_next_entry(ldata);
3359         }
3360
3361         if (idx < linkno)
3362                 RETURN(-ENODATA);
3363
3364         RETURN(0);
3365 }
3366
3367 /**
3368  * Get parent FID and name of an identified link. Operation is performed for
3369  * a given link number, letting the caller iterate over linkno to list one or
3370  * all links of an entry.
3371  *
3372  * \param[in]     file - File descriptor against which to perform the operation
3373  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3374  *                       on and the available size. It is eventually filled with
3375  *                       the requested information or left untouched on error
3376  *
3377  * \retval - 0 on success
3378  * \retval - Appropriate negative error code on failure
3379  */
3380 int ll_getparent(struct file *file, struct getparent __user *arg)
3381 {
3382         struct inode            *inode = file_inode(file);
3383         struct linkea_data      *ldata;
3384         struct lu_buf            buf = LU_BUF_NULL;
3385         struct lu_name           ln;
3386         struct lu_fid            parent_fid;
3387         __u32                    linkno;
3388         __u32                    name_size;
3389         int                      rc;
3390
3391         ENTRY;
3392
3393         if (!cfs_capable(CAP_DAC_READ_SEARCH) &&
3394             !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
3395                 RETURN(-EPERM);
3396
3397         if (get_user(name_size, &arg->gp_name_size))
3398                 RETURN(-EFAULT);
3399
3400         if (get_user(linkno, &arg->gp_linkno))
3401                 RETURN(-EFAULT);
3402
3403         if (name_size > PATH_MAX)
3404                 RETURN(-EINVAL);
3405
3406         OBD_ALLOC(ldata, sizeof(*ldata));
3407         if (ldata == NULL)
3408                 RETURN(-ENOMEM);
3409
3410         rc = linkea_data_new(ldata, &buf);
3411         if (rc < 0)
3412                 GOTO(ldata_free, rc);
3413
3414         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3415                            buf.lb_len, OBD_MD_FLXATTR);
3416         if (rc < 0)
3417                 GOTO(lb_free, rc);
3418
3419         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3420         if (rc < 0)
3421                 GOTO(lb_free, rc);
3422
3423         if (ln.ln_namelen >= name_size)
3424                 GOTO(lb_free, rc = -EOVERFLOW);
3425
3426         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3427                 GOTO(lb_free, rc = -EFAULT);
3428
3429         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3430                 GOTO(lb_free, rc = -EFAULT);
3431
3432         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3433                 GOTO(lb_free, rc = -EFAULT);
3434
3435 lb_free:
3436         lu_buf_free(&buf);
3437 ldata_free:
3438         OBD_FREE(ldata, sizeof(*ldata));
3439
3440         RETURN(rc);
3441 }