Whamcloud - gitweb
LU-14004 llite: default lsm update may memory leak
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/llite/llite_lib.c
32  *
33  * Lustre Light Super operations
34  */
35
36 #define DEBUG_SUBSYSTEM S_LLITE
37
38 #include <linux/cpu.h>
39 #include <linux/module.h>
40 #include <linux/random.h>
41 #include <linux/statfs.h>
42 #include <linux/time.h>
43 #include <linux/types.h>
44 #include <libcfs/linux/linux-uuid.h>
45 #include <linux/version.h>
46 #include <linux/mm.h>
47 #include <linux/user_namespace.h>
48 #include <linux/delay.h>
49 #include <linux/uidgid.h>
50 #include <linux/fs_struct.h>
51
52 #ifndef HAVE_CPUS_READ_LOCK
53 #include <libcfs/linux/linux-cpu.h>
54 #endif
55 #include <uapi/linux/lustre/lustre_ioctl.h>
56 #ifdef HAVE_UAPI_LINUX_MOUNT_H
57 #include <uapi/linux/mount.h>
58 #endif
59
60 #include <lustre_ha.h>
61 #include <lustre_dlm.h>
62 #include <lprocfs_status.h>
63 #include <lustre_disk.h>
64 #include <uapi/linux/lustre/lustre_param.h>
65 #include <lustre_log.h>
66 #include <cl_object.h>
67 #include <obd_cksum.h>
68 #include "llite_internal.h"
69
70 struct kmem_cache *ll_file_data_slab;
71
72 #ifndef log2
73 #define log2(n) ffz(~(n))
74 #endif
75
76 /**
77  * If there is only one number of core visible to Lustre,
78  * async readahead will be disabled, to avoid massive over
79  * subscription, we use 1/2 of active cores as default max
80  * async readahead requests.
81  */
82 static inline unsigned int ll_get_ra_async_max_active(void)
83 {
84         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
85 }
86
87 static struct ll_sb_info *ll_init_sbi(void)
88 {
89         struct ll_sb_info *sbi = NULL;
90         unsigned long pages;
91         unsigned long lru_page_max;
92         struct sysinfo si;
93         int rc;
94         int i;
95
96         ENTRY;
97
98         OBD_ALLOC_PTR(sbi);
99         if (sbi == NULL)
100                 RETURN(ERR_PTR(-ENOMEM));
101
102         rc = pcc_super_init(&sbi->ll_pcc_super);
103         if (rc < 0)
104                 GOTO(out_sbi, rc);
105
106         spin_lock_init(&sbi->ll_lock);
107         mutex_init(&sbi->ll_lco.lco_lock);
108         spin_lock_init(&sbi->ll_pp_extent_lock);
109         spin_lock_init(&sbi->ll_process_lock);
110         sbi->ll_rw_stats_on = 0;
111         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
112
113         si_meminfo(&si);
114         pages = si.totalram - si.totalhigh;
115         lru_page_max = pages / 2;
116
117         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
118         sbi->ll_ra_info.ll_readahead_wq =
119                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
120                                        0, CFS_CPT_ANY,
121                                        sbi->ll_ra_info.ra_async_max_active);
122         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
123                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
124
125         /* initialize ll_cache data */
126         sbi->ll_cache = cl_cache_init(lru_page_max);
127         if (sbi->ll_cache == NULL)
128                 GOTO(out_destroy_ra, rc = -ENOMEM);
129
130         /* initialize foreign symlink prefix path */
131         OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
132         if (sbi->ll_foreign_symlink_prefix == NULL)
133                 GOTO(out_destroy_ra, rc = -ENOMEM);
134         memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/"));
135         sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/");
136
137         /* initialize foreign symlink upcall path, none by default */
138         OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none"));
139         if (sbi->ll_foreign_symlink_upcall == NULL)
140                 GOTO(out_destroy_ra, rc = -ENOMEM);
141         memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none"));
142         sbi->ll_foreign_symlink_upcall_items = NULL;
143         sbi->ll_foreign_symlink_upcall_nb_items = 0;
144         init_rwsem(&sbi->ll_foreign_symlink_sem);
145         /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags)
146          * not enabled by default
147          */
148
149         sbi->ll_ra_info.ra_max_pages =
150                 min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
151         sbi->ll_ra_info.ra_max_pages_per_file =
152                 min(sbi->ll_ra_info.ra_max_pages / 4,
153                     SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
154         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
155                                 sbi->ll_ra_info.ra_max_pages_per_file;
156         sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
157         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
158         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
159
160         sbi->ll_flags |= LL_SBI_VERBOSE;
161 #ifdef ENABLE_CHECKSUM
162         sbi->ll_flags |= LL_SBI_CHECKSUM;
163 #endif
164 #ifdef ENABLE_FLOCK
165         sbi->ll_flags |= LL_SBI_FLOCK;
166 #endif
167
168 #ifdef HAVE_LRU_RESIZE_SUPPORT
169         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
170 #endif
171         sbi->ll_flags |= LL_SBI_LAZYSTATFS;
172
173         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
174                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
175                                pp_r_hist.oh_lock);
176                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
177                                pp_w_hist.oh_lock);
178         }
179
180         /* metadata statahead is enabled by default */
181         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
182         sbi->ll_sa_max = LL_SA_RPC_DEF;
183         atomic_set(&sbi->ll_sa_total, 0);
184         atomic_set(&sbi->ll_sa_wrong, 0);
185         atomic_set(&sbi->ll_sa_running, 0);
186         atomic_set(&sbi->ll_agl_total, 0);
187         sbi->ll_flags |= LL_SBI_AGL_ENABLED;
188         sbi->ll_flags |= LL_SBI_FAST_READ;
189         sbi->ll_flags |= LL_SBI_TINY_WRITE;
190         ll_sbi_set_encrypt(sbi, true);
191
192         /* root squash */
193         sbi->ll_squash.rsi_uid = 0;
194         sbi->ll_squash.rsi_gid = 0;
195         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
196         spin_lock_init(&sbi->ll_squash.rsi_lock);
197
198         /* Per-filesystem file heat */
199         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
200         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
201
202         /* Per-fs open heat level before requesting open lock */
203         sbi->ll_oc_thrsh_count = SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT;
204         sbi->ll_oc_max_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS;
205         sbi->ll_oc_thrsh_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MS;
206         RETURN(sbi);
207 out_destroy_ra:
208         if (sbi->ll_foreign_symlink_prefix)
209                 OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
210         if (sbi->ll_cache) {
211                 cl_cache_decref(sbi->ll_cache);
212                 sbi->ll_cache = NULL;
213         }
214         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
215 out_pcc:
216         pcc_super_fini(&sbi->ll_pcc_super);
217 out_sbi:
218         OBD_FREE_PTR(sbi);
219         RETURN(ERR_PTR(rc));
220 }
221
222 static void ll_free_sbi(struct super_block *sb)
223 {
224         struct ll_sb_info *sbi = ll_s2sbi(sb);
225         ENTRY;
226
227         if (sbi != NULL) {
228                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
229                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
230                 if (sbi->ll_ra_info.ll_readahead_wq)
231                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
232                 if (sbi->ll_cache != NULL) {
233                         cl_cache_decref(sbi->ll_cache);
234                         sbi->ll_cache = NULL;
235                 }
236                 if (sbi->ll_foreign_symlink_prefix) {
237                         OBD_FREE(sbi->ll_foreign_symlink_prefix,
238                                  sbi->ll_foreign_symlink_prefix_size);
239                         sbi->ll_foreign_symlink_prefix = NULL;
240                 }
241                 if (sbi->ll_foreign_symlink_upcall) {
242                         OBD_FREE(sbi->ll_foreign_symlink_upcall,
243                                  strlen(sbi->ll_foreign_symlink_upcall) +
244                                        1);
245                         sbi->ll_foreign_symlink_upcall = NULL;
246                 }
247                 if (sbi->ll_foreign_symlink_upcall_items) {
248                         int i;
249                         int nb_items = sbi->ll_foreign_symlink_upcall_nb_items;
250                         struct ll_foreign_symlink_upcall_item *items =
251                                 sbi->ll_foreign_symlink_upcall_items;
252
253                         for (i = 0 ; i < nb_items; i++)
254                                 if (items[i].type == STRING_TYPE)
255                                         OBD_FREE(items[i].string,
256                                                        items[i].size);
257
258                         OBD_FREE_LARGE(items, nb_items *
259                                 sizeof(struct ll_foreign_symlink_upcall_item));
260                         sbi->ll_foreign_symlink_upcall_items = NULL;
261                 }
262                 pcc_super_fini(&sbi->ll_pcc_super);
263                 OBD_FREE(sbi, sizeof(*sbi));
264         }
265         EXIT;
266 }
267
268 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
269 {
270         struct inode *root = NULL;
271         struct ll_sb_info *sbi = ll_s2sbi(sb);
272         struct obd_statfs *osfs = NULL;
273         struct ptlrpc_request *request = NULL;
274         struct obd_connect_data *data = NULL;
275         struct obd_uuid *uuid;
276         struct md_op_data *op_data;
277         struct lustre_md lmd;
278         u64 valid;
279         int size, err, checksum;
280
281         ENTRY;
282         sbi->ll_md_obd = class_name2obd(md);
283         if (!sbi->ll_md_obd) {
284                 CERROR("MD %s: not setup or attached\n", md);
285                 RETURN(-EINVAL);
286         }
287
288         OBD_ALLOC_PTR(data);
289         if (data == NULL)
290                 RETURN(-ENOMEM);
291
292         OBD_ALLOC_PTR(osfs);
293         if (osfs == NULL) {
294                 OBD_FREE_PTR(data);
295                 RETURN(-ENOMEM);
296         }
297
298         /* pass client page size via ocd_grant_blkbits, the server should report
299          * back its backend blocksize for grant calculation purpose */
300         data->ocd_grant_blkbits = PAGE_SHIFT;
301
302         /* indicate MDT features supported by this client */
303         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
304                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
305                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
306                                   OBD_CONNECT_SRVLOCK  | OBD_CONNECT_TRUNCLOCK|
307                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
308                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
309                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
310                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
311                                   OBD_CONNECT_64BITHASH |
312                                   OBD_CONNECT_EINPROGRESS |
313                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
314                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
315                                   OBD_CONNECT_MAX_EASIZE |
316                                   OBD_CONNECT_FLOCK_DEAD |
317                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
318                                   OBD_CONNECT_OPEN_BY_FID |
319                                   OBD_CONNECT_DIR_STRIPE |
320                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
321                                   OBD_CONNECT_SUBTREE |
322                                   OBD_CONNECT_MULTIMODRPCS |
323                                   OBD_CONNECT_GRANT_PARAM |
324                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
325
326         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
327                                    OBD_CONNECT2_SUM_STATFS |
328                                    OBD_CONNECT2_OVERSTRIPING |
329                                    OBD_CONNECT2_FLR |
330                                    OBD_CONNECT2_LOCK_CONVERT |
331                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
332                                    OBD_CONNECT2_INC_XID |
333                                    OBD_CONNECT2_LSOM |
334                                    OBD_CONNECT2_ASYNC_DISCARD |
335                                    OBD_CONNECT2_PCC |
336                                    OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
337                                    OBD_CONNECT2_GETATTR_PFID |
338                                    OBD_CONNECT2_DOM_LVB;
339
340 #ifdef HAVE_LRU_RESIZE_SUPPORT
341         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
342                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
343 #endif
344         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
345
346         data->ocd_cksum_types = obd_cksum_types_supported_client();
347
348         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
349                 /* flag mdc connection as lightweight, only used for test
350                  * purpose, use with care */
351                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
352
353         data->ocd_ibits_known = MDS_INODELOCK_FULL;
354         data->ocd_version = LUSTRE_VERSION_CODE;
355
356         if (sb->s_flags & SB_RDONLY)
357                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
358         if (sbi->ll_flags & LL_SBI_USER_XATTR)
359                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
360
361 #ifdef SB_NOSEC
362         /* Setting this indicates we correctly support S_NOSEC (See kernel
363          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
364          */
365         sb->s_flags |= SB_NOSEC;
366 #endif
367         sbi->ll_fop = ll_select_file_operations(sbi);
368
369         /* always ping even if server suppress_pings */
370         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
371                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
372
373         obd_connect_set_secctx(data);
374         if (ll_sbi_has_encrypt(sbi))
375                 obd_connect_set_enc(data);
376
377 #if defined(CONFIG_SECURITY)
378         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
379 #endif
380
381         data->ocd_brw_size = MD_MAX_BRW_SIZE;
382
383         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
384                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
385         if (err == -EBUSY) {
386                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
387                                    "recovery, of which this client is not a "
388                                    "part. Please wait for recovery to complete,"
389                                    " abort, or time out.\n", md);
390                 GOTO(out, err);
391         } else if (err) {
392                 CERROR("cannot connect to %s: rc = %d\n", md, err);
393                 GOTO(out, err);
394         }
395
396         sbi->ll_md_exp->exp_connect_data = *data;
397
398         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
399                            LUSTRE_SEQ_METADATA);
400         if (err) {
401                 CERROR("%s: Can't init metadata layer FID infrastructure, "
402                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
403                 GOTO(out_md, err);
404         }
405
406         /* For mount, we only need fs info from MDT0, and also in DNE, it
407          * can make sure the client can be mounted as long as MDT0 is
408          * avaible */
409         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
410                         ktime_get_seconds() - sbi->ll_statfs_max_age,
411                         OBD_STATFS_FOR_MDT0);
412         if (err)
413                 GOTO(out_md_fid, err);
414
415         /* This needs to be after statfs to ensure connect has finished.
416          * Note that "data" does NOT contain the valid connect reply.
417          * If connecting to a 1.8 server there will be no LMV device, so
418          * we can access the MDC export directly and exp_connect_flags will
419          * be non-zero, but if accessing an upgraded 2.1 server it will
420          * have the correct flags filled in.
421          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
422         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
423         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
424             valid != CLIENT_CONNECT_MDT_REQD) {
425                 char *buf;
426
427                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
428                 obd_connect_flags2str(buf, PAGE_SIZE,
429                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
430                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
431                                    "feature(s) needed for correct operation "
432                                    "of this client (%s). Please upgrade "
433                                    "server or downgrade client.\n",
434                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
435                 OBD_FREE(buf, PAGE_SIZE);
436                 GOTO(out_md_fid, err = -EPROTO);
437         }
438
439         size = sizeof(*data);
440         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
441                            KEY_CONN_DATA,  &size, data);
442         if (err) {
443                 CERROR("%s: Get connect data failed: rc = %d\n",
444                        sbi->ll_md_exp->exp_obd->obd_name, err);
445                 GOTO(out_md_fid, err);
446         }
447
448         LASSERT(osfs->os_bsize);
449         sb->s_blocksize = osfs->os_bsize;
450         sb->s_blocksize_bits = log2(osfs->os_bsize);
451         sb->s_magic = LL_SUPER_MAGIC;
452         sb->s_maxbytes = MAX_LFS_FILESIZE;
453         sbi->ll_namelen = osfs->os_namelen;
454         sbi->ll_mnt.mnt = current->fs->root.mnt;
455
456         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
457             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
458                 LCONSOLE_INFO("Disabling user_xattr feature because "
459                               "it is not supported on the server\n");
460                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
461         }
462
463         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
464 #ifdef SB_POSIXACL
465                 sb->s_flags |= SB_POSIXACL;
466 #endif
467                 sbi->ll_flags |= LL_SBI_ACL;
468         } else {
469                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
470 #ifdef SB_POSIXACL
471                 sb->s_flags &= ~SB_POSIXACL;
472 #endif
473                 sbi->ll_flags &= ~LL_SBI_ACL;
474         }
475
476         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
477                 sbi->ll_flags |= LL_SBI_64BIT_HASH;
478
479         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
480                 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
481
482         if (obd_connect_has_secctx(data))
483                 sbi->ll_flags |= LL_SBI_FILE_SECCTX;
484
485         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
486                 if (ll_sbi_has_test_dummy_encryption(sbi))
487                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
488                                       sbi->ll_fsname,
489                                       sbi->ll_md_exp->exp_obd->obd_name);
490                 ll_sbi_set_encrypt(sbi, false);
491         }
492
493         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
494                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
495                         LCONSOLE_INFO("%s: disabling xattr cache due to "
496                                       "unknown maximum xattr size.\n", dt);
497                 } else if (!sbi->ll_xattr_cache_set) {
498                         /* If xattr_cache is already set (no matter 0 or 1)
499                          * during processing llog, it won't be enabled here. */
500                         sbi->ll_flags |= LL_SBI_XATTR_CACHE;
501                         sbi->ll_xattr_cache_enabled = 1;
502                 }
503         }
504
505         sbi->ll_dt_obd = class_name2obd(dt);
506         if (!sbi->ll_dt_obd) {
507                 CERROR("DT %s: not setup or attached\n", dt);
508                 GOTO(out_md_fid, err = -ENODEV);
509         }
510
511         /* pass client page size via ocd_grant_blkbits, the server should report
512          * back its backend blocksize for grant calculation purpose */
513         data->ocd_grant_blkbits = PAGE_SHIFT;
514
515         /* indicate OST features supported by this client */
516         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
517                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
518                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
519                                   OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
520                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
521                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
522                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
523                                   OBD_CONNECT_EINPROGRESS |
524                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
525                                   OBD_CONNECT_LAYOUTLOCK |
526                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
527                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
528                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
529         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
530                                    OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK;
531
532         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
533                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
534
535         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
536          * disabled by default, because it can still be enabled on the
537          * fly via /sys. As a consequence, we still need to come to an
538          * agreement on the supported algorithms at connect time
539          */
540         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
541
542         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
543                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
544         else
545                 data->ocd_cksum_types = obd_cksum_types_supported_client();
546
547 #ifdef HAVE_LRU_RESIZE_SUPPORT
548         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
549 #endif
550         /* always ping even if server suppress_pings */
551         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
552                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
553
554         if (ll_sbi_has_encrypt(sbi))
555                 obd_connect_set_enc(data);
556
557         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
558                "ocd_grant: %d\n", data->ocd_connect_flags,
559                data->ocd_version, data->ocd_grant);
560
561         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
562         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
563
564         data->ocd_brw_size = DT_MAX_BRW_SIZE;
565
566         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
567                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
568         if (err == -EBUSY) {
569                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
570                                    "recovery, of which this client is not a "
571                                    "part.  Please wait for recovery to "
572                                    "complete, abort, or time out.\n", dt);
573                 GOTO(out_md, err);
574         } else if (err) {
575                 CERROR("%s: Cannot connect to %s: rc = %d\n",
576                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
577                 GOTO(out_md, err);
578         }
579
580         if (ll_sbi_has_encrypt(sbi) &&
581             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
582                 if (ll_sbi_has_test_dummy_encryption(sbi))
583                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
584                                       sbi->ll_fsname, dt);
585                 ll_sbi_set_encrypt(sbi, false);
586         } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
587                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
588         }
589
590         sbi->ll_dt_exp->exp_connect_data = *data;
591
592         /* Don't change value if it was specified in the config log */
593         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
594                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
595                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
596                               (data->ocd_brw_size >> PAGE_SHIFT));
597                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
598                     sbi->ll_ra_info.ra_max_pages_per_file)
599                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
600                                 sbi->ll_ra_info.ra_max_pages_per_file;
601         }
602
603         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
604                            LUSTRE_SEQ_METADATA);
605         if (err) {
606                 CERROR("%s: Can't init data layer FID infrastructure, "
607                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
608                 GOTO(out_dt, err);
609         }
610
611         mutex_lock(&sbi->ll_lco.lco_lock);
612         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
613         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
614         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
615         mutex_unlock(&sbi->ll_lco.lco_lock);
616
617         fid_zero(&sbi->ll_root_fid);
618         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
619                            &sbi->ll_root_fid);
620         if (err) {
621                 CERROR("cannot mds_connect: rc = %d\n", err);
622                 GOTO(out_lock_cn_cb, err);
623         }
624         if (!fid_is_sane(&sbi->ll_root_fid)) {
625                 CERROR("%s: Invalid root fid "DFID" during mount\n",
626                        sbi->ll_md_exp->exp_obd->obd_name,
627                        PFID(&sbi->ll_root_fid));
628                 GOTO(out_lock_cn_cb, err = -EINVAL);
629         }
630         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
631
632         sb->s_op = &lustre_super_operations;
633         sb->s_xattr = ll_xattr_handlers;
634 #if THREAD_SIZE >= 8192 /*b=17630*/
635         sb->s_export_op = &lustre_export_operations;
636 #endif
637 #ifdef HAVE_LUSTRE_CRYPTO
638         llcrypt_set_ops(sb, &lustre_cryptops);
639 #endif
640
641         /* make root inode
642          * XXX: move this to after cbd setup? */
643         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
644         if (sbi->ll_flags & LL_SBI_ACL)
645                 valid |= OBD_MD_FLACL;
646
647         OBD_ALLOC_PTR(op_data);
648         if (op_data == NULL)
649                 GOTO(out_lock_cn_cb, err = -ENOMEM);
650
651         op_data->op_fid1 = sbi->ll_root_fid;
652         op_data->op_mode = 0;
653         op_data->op_valid = valid;
654
655         err = md_getattr(sbi->ll_md_exp, op_data, &request);
656
657         OBD_FREE_PTR(op_data);
658         if (err) {
659                 CERROR("%s: md_getattr failed for root: rc = %d\n",
660                        sbi->ll_md_exp->exp_obd->obd_name, err);
661                 GOTO(out_lock_cn_cb, err);
662         }
663
664         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
665                                sbi->ll_md_exp, &lmd);
666         if (err) {
667                 CERROR("failed to understand root inode md: rc = %d\n", err);
668                 ptlrpc_req_finished(request);
669                 GOTO(out_lock_cn_cb, err);
670         }
671
672         LASSERT(fid_is_sane(&sbi->ll_root_fid));
673         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
674                                             sbi->ll_flags & LL_SBI_32BIT_API),
675                        &lmd);
676         md_free_lustre_md(sbi->ll_md_exp, &lmd);
677         ptlrpc_req_finished(request);
678
679         if (IS_ERR(root)) {
680                 lmd_clear_acl(&lmd);
681                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
682                 root = NULL;
683                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
684                        sbi->ll_fsname, err);
685                 GOTO(out_root, err);
686         }
687
688         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
689         if (sbi->ll_checksum_set) {
690                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
691                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
692                                          sizeof(checksum), &checksum, NULL);
693                 if (err) {
694                         CERROR("%s: Set checksum failed: rc = %d\n",
695                                sbi->ll_dt_exp->exp_obd->obd_name, err);
696                         GOTO(out_root, err);
697                 }
698         }
699         cl_sb_init(sb);
700
701         sb->s_root = d_make_root(root);
702         if (sb->s_root == NULL) {
703                 err = -ENOMEM;
704                 CERROR("%s: can't make root dentry: rc = %d\n",
705                        sbi->ll_fsname, err);
706                 GOTO(out_root, err);
707         }
708
709         sbi->ll_sdev_orig = sb->s_dev;
710
711         /* We set sb->s_dev equal on all lustre clients in order to support
712          * NFS export clustering.  NFSD requires that the FSID be the same
713          * on all clients. */
714         /* s_dev is also used in lt_compare() to compare two fs, but that is
715          * only a node-local comparison. */
716         uuid = obd_get_uuid(sbi->ll_md_exp);
717         if (uuid != NULL)
718                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
719
720         if (data != NULL)
721                 OBD_FREE_PTR(data);
722         if (osfs != NULL)
723                 OBD_FREE_PTR(osfs);
724
725         if (sbi->ll_dt_obd) {
726                 err = sysfs_create_link(&sbi->ll_kset.kobj,
727                                         &sbi->ll_dt_obd->obd_kset.kobj,
728                                         sbi->ll_dt_obd->obd_type->typ_name);
729                 if (err < 0) {
730                         CERROR("%s: could not register %s in llite: rc = %d\n",
731                                dt, sbi->ll_fsname, err);
732                         err = 0;
733                 }
734         }
735
736         if (sbi->ll_md_obd) {
737                 err = sysfs_create_link(&sbi->ll_kset.kobj,
738                                         &sbi->ll_md_obd->obd_kset.kobj,
739                                         sbi->ll_md_obd->obd_type->typ_name);
740                 if (err < 0) {
741                         CERROR("%s: could not register %s in llite: rc = %d\n",
742                                md, sbi->ll_fsname, err);
743                         err = 0;
744                 }
745         }
746
747         RETURN(err);
748 out_root:
749         iput(root);
750 out_lock_cn_cb:
751         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
752 out_dt:
753         obd_disconnect(sbi->ll_dt_exp);
754         sbi->ll_dt_exp = NULL;
755         sbi->ll_dt_obd = NULL;
756 out_md_fid:
757         obd_fid_fini(sbi->ll_md_exp->exp_obd);
758 out_md:
759         obd_disconnect(sbi->ll_md_exp);
760         sbi->ll_md_exp = NULL;
761         sbi->ll_md_obd = NULL;
762 out:
763         if (data != NULL)
764                 OBD_FREE_PTR(data);
765         if (osfs != NULL)
766                 OBD_FREE_PTR(osfs);
767         return err;
768 }
769
770 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
771 {
772         int size, rc;
773
774         size = sizeof(*lmmsize);
775         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
776                           KEY_MAX_EASIZE, &size, lmmsize);
777         if (rc != 0) {
778                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
779                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
780                 RETURN(rc);
781         }
782
783         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
784
785         size = sizeof(int);
786         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
787                           KEY_MAX_EASIZE, &size, lmmsize);
788         if (rc)
789                 CERROR("Get max mdsize error rc %d\n", rc);
790
791         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
792
793         RETURN(rc);
794 }
795
796 /**
797  * Get the value of the default_easize parameter.
798  *
799  * \see client_obd::cl_default_mds_easize
800  *
801  * \param[in] sbi       superblock info for this filesystem
802  * \param[out] lmmsize  pointer to storage location for value
803  *
804  * \retval 0            on success
805  * \retval negative     negated errno on failure
806  */
807 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
808 {
809         int size, rc;
810
811         size = sizeof(int);
812         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
813                          KEY_DEFAULT_EASIZE, &size, lmmsize);
814         if (rc)
815                 CERROR("Get default mdsize error rc %d\n", rc);
816
817         RETURN(rc);
818 }
819
820 /**
821  * Set the default_easize parameter to the given value.
822  *
823  * \see client_obd::cl_default_mds_easize
824  *
825  * \param[in] sbi       superblock info for this filesystem
826  * \param[in] lmmsize   the size to set
827  *
828  * \retval 0            on success
829  * \retval negative     negated errno on failure
830  */
831 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
832 {
833         int rc;
834
835         if (lmmsize < sizeof(struct lov_mds_md) ||
836             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
837                 return -EINVAL;
838
839         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
840                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
841                                 sizeof(int), &lmmsize, NULL);
842
843         RETURN(rc);
844 }
845
846 static void client_common_put_super(struct super_block *sb)
847 {
848         struct ll_sb_info *sbi = ll_s2sbi(sb);
849         ENTRY;
850
851         cl_sb_fini(sb);
852
853         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
854         obd_disconnect(sbi->ll_dt_exp);
855         sbi->ll_dt_exp = NULL;
856
857         ll_debugfs_unregister_super(sb);
858
859         obd_fid_fini(sbi->ll_md_exp->exp_obd);
860         obd_disconnect(sbi->ll_md_exp);
861         sbi->ll_md_exp = NULL;
862
863         EXIT;
864 }
865
866 void ll_kill_super(struct super_block *sb)
867 {
868         struct ll_sb_info *sbi;
869         ENTRY;
870
871         /* not init sb ?*/
872         if (!(sb->s_flags & SB_ACTIVE))
873                 return;
874
875         sbi = ll_s2sbi(sb);
876         /* we need restore s_dev from changed for clustred NFS before put_super
877          * because new kernels have cached s_dev and change sb->s_dev in
878          * put_super not affected real removing devices */
879         if (sbi) {
880                 sb->s_dev = sbi->ll_sdev_orig;
881
882                 /* wait running statahead threads to quit */
883                 while (atomic_read(&sbi->ll_sa_running) > 0)
884                         schedule_timeout_uninterruptible(
885                                 cfs_time_seconds(1) >> 3);
886         }
887
888         EXIT;
889 }
890
891 static inline int ll_set_opt(const char *opt, char *data, int fl)
892 {
893         if (strncmp(opt, data, strlen(opt)) != 0)
894                 return 0;
895         else
896                 return fl;
897 }
898
899 /* non-client-specific mount options are parsed in lmd_parse */
900 static int ll_options(char *options, struct ll_sb_info *sbi)
901 {
902         int tmp;
903         char *s1 = options, *s2;
904         int *flags = &sbi->ll_flags;
905         ENTRY;
906
907         if (!options)
908                 RETURN(0);
909
910         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
911
912         while (*s1) {
913                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
914                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
915                 if (tmp) {
916                         *flags |= tmp;
917                         goto next;
918                 }
919                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
920                 if (tmp) {
921                         *flags = (*flags & ~LL_SBI_LOCALFLOCK) | tmp;
922                         goto next;
923                 }
924                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
925                 if (tmp) {
926                         *flags = (*flags & ~LL_SBI_FLOCK) | tmp;
927                         goto next;
928                 }
929                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
930                 if (tmp) {
931                         *flags &= ~tmp;
932                         goto next;
933                 }
934                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
935                 if (tmp) {
936                         *flags |= tmp;
937                         goto next;
938                 }
939                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
940                 if (tmp) {
941                         *flags &= ~tmp;
942                         goto next;
943                 }
944                 tmp = ll_set_opt("context", s1, 1);
945                 if (tmp)
946                         goto next;
947                 tmp = ll_set_opt("fscontext", s1, 1);
948                 if (tmp)
949                         goto next;
950                 tmp = ll_set_opt("defcontext", s1, 1);
951                 if (tmp)
952                         goto next;
953                 tmp = ll_set_opt("rootcontext", s1, 1);
954                 if (tmp)
955                         goto next;
956                 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
957                 if (tmp) {
958                         *flags |= tmp;
959                         goto next;
960                 }
961                 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
962                 if (tmp) {
963                         *flags &= ~tmp;
964                         goto next;
965                 }
966
967                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
968                 if (tmp) {
969                         *flags |= tmp;
970                         sbi->ll_checksum_set = 1;
971                         goto next;
972                 }
973                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
974                 if (tmp) {
975                         *flags &= ~tmp;
976                         sbi->ll_checksum_set = 1;
977                         goto next;
978                 }
979                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
980                 if (tmp) {
981                         *flags |= tmp;
982                         goto next;
983                 }
984                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
985                 if (tmp) {
986                         *flags &= ~tmp;
987                         goto next;
988                 }
989                 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
990                 if (tmp) {
991                         *flags |= tmp;
992                         goto next;
993                 }
994                 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
995                 if (tmp) {
996                         *flags &= ~tmp;
997                         goto next;
998                 }
999                 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
1000                 if (tmp) {
1001                         *flags |= tmp;
1002                         goto next;
1003                 }
1004                 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
1005                 if (tmp) {
1006                         *flags |= tmp;
1007                         goto next;
1008                 }
1009                 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
1010                 if (tmp) {
1011                         *flags &= ~tmp;
1012                         goto next;
1013                 }
1014                 tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
1015                 if (tmp) {
1016                         *flags |= tmp;
1017                         goto next;
1018                 }
1019                 tmp = ll_set_opt("test_dummy_encryption", s1,
1020                                  LL_SBI_TEST_DUMMY_ENCRYPTION);
1021                 if (tmp) {
1022 #ifdef HAVE_LUSTRE_CRYPTO
1023                         *flags |= tmp;
1024 #else
1025                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
1026 #endif
1027                         goto next;
1028                 }
1029                 tmp = ll_set_opt("noencrypt", s1, LL_SBI_ENCRYPT);
1030                 if (tmp) {
1031 #ifdef HAVE_LUSTRE_CRYPTO
1032                         *flags &= ~tmp;
1033 #else
1034                         LCONSOLE_WARN("noencrypt mount option ignored: encryption not supported\n");
1035 #endif
1036                         goto next;
1037                 }
1038                 tmp = ll_set_opt("foreign_symlink", s1, LL_SBI_FOREIGN_SYMLINK);
1039                 if (tmp) {
1040                         int prefix_pos = sizeof("foreign_symlink=") - 1;
1041                         int equal_pos = sizeof("foreign_symlink=") - 2;
1042
1043                         /* non-default prefix provided ? */
1044                         if (strlen(s1) >= sizeof("foreign_symlink=") &&
1045                             *(s1 + equal_pos) == '=') {
1046                                 char *old = sbi->ll_foreign_symlink_prefix;
1047                                 size_t old_len =
1048                                         sbi->ll_foreign_symlink_prefix_size;
1049
1050                                 /* path must be absolute */
1051                                 if (*(s1 + sizeof("foreign_symlink=")
1052                                       - 1) != '/') {
1053                                         LCONSOLE_ERROR_MSG(0x152,
1054                                                 "foreign prefix '%s' must be an absolute path\n",
1055                                                 s1 + prefix_pos);
1056                                         RETURN(-EINVAL);
1057                                 }
1058                                 /* last option ? */
1059                                 s2 = strchrnul(s1 + prefix_pos, ',');
1060
1061                                 if (sbi->ll_foreign_symlink_prefix) {
1062                                         sbi->ll_foreign_symlink_prefix = NULL;
1063                                         sbi->ll_foreign_symlink_prefix_size = 0;
1064                                 }
1065                                 /* alloc for path length and '\0' */
1066                                 OBD_ALLOC(sbi->ll_foreign_symlink_prefix,
1067                                                 s2 - (s1 + prefix_pos) + 1);
1068                                 if (!sbi->ll_foreign_symlink_prefix) {
1069                                         /* restore previous */
1070                                         sbi->ll_foreign_symlink_prefix = old;
1071                                         sbi->ll_foreign_symlink_prefix_size =
1072                                                 old_len;
1073                                         RETURN(-ENOMEM);
1074                                 }
1075                                 if (old)
1076                                         OBD_FREE(old, old_len);
1077                                 strncpy(sbi->ll_foreign_symlink_prefix,
1078                                         s1 + prefix_pos,
1079                                         s2 - (s1 + prefix_pos));
1080                                 sbi->ll_foreign_symlink_prefix_size =
1081                                         s2 - (s1 + prefix_pos) + 1;
1082                         } else {
1083                                 LCONSOLE_ERROR_MSG(0x152,
1084                                                    "invalid %s option\n", s1);
1085                         }
1086                         /* enable foreign symlink support */
1087                         *flags |= tmp;
1088                         goto next;
1089                 }
1090                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
1091                                    s1);
1092                 RETURN(-EINVAL);
1093
1094 next:
1095                 /* Find next opt */
1096                 s2 = strchr(s1, ',');
1097                 if (s2 == NULL)
1098                         break;
1099                 s1 = s2 + 1;
1100         }
1101         RETURN(0);
1102 }
1103
1104 void ll_lli_init(struct ll_inode_info *lli)
1105 {
1106         lli->lli_inode_magic = LLI_INODE_MAGIC;
1107         lli->lli_flags = 0;
1108         spin_lock_init(&lli->lli_lock);
1109         lli->lli_posix_acl = NULL;
1110         /* Do not set lli_fid, it has been initialized already. */
1111         fid_zero(&lli->lli_pfid);
1112         lli->lli_mds_read_och = NULL;
1113         lli->lli_mds_write_och = NULL;
1114         lli->lli_mds_exec_och = NULL;
1115         lli->lli_open_fd_read_count = 0;
1116         lli->lli_open_fd_write_count = 0;
1117         lli->lli_open_fd_exec_count = 0;
1118         mutex_init(&lli->lli_och_mutex);
1119         spin_lock_init(&lli->lli_agl_lock);
1120         spin_lock_init(&lli->lli_layout_lock);
1121         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1122         lli->lli_clob = NULL;
1123
1124         init_rwsem(&lli->lli_xattrs_list_rwsem);
1125         mutex_init(&lli->lli_xattrs_enq_lock);
1126
1127         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1128         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1129                 lli->lli_opendir_key = NULL;
1130                 lli->lli_sai = NULL;
1131                 spin_lock_init(&lli->lli_sa_lock);
1132                 lli->lli_opendir_pid = 0;
1133                 lli->lli_sa_enabled = 0;
1134                 init_rwsem(&lli->lli_lsm_sem);
1135         } else {
1136                 mutex_init(&lli->lli_size_mutex);
1137                 mutex_init(&lli->lli_setattr_mutex);
1138                 lli->lli_symlink_name = NULL;
1139                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1140                 range_lock_tree_init(&lli->lli_write_tree);
1141                 init_rwsem(&lli->lli_glimpse_sem);
1142                 lli->lli_glimpse_time = ktime_set(0, 0);
1143                 INIT_LIST_HEAD(&lli->lli_agl_list);
1144                 lli->lli_agl_index = 0;
1145                 lli->lli_async_rc = 0;
1146                 spin_lock_init(&lli->lli_heat_lock);
1147                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1148                 lli->lli_heat_flags = 0;
1149                 mutex_init(&lli->lli_pcc_lock);
1150                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1151                 lli->lli_pcc_inode = NULL;
1152                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1153                 lli->lli_pcc_generation = 0;
1154                 mutex_init(&lli->lli_group_mutex);
1155                 lli->lli_group_users = 0;
1156                 lli->lli_group_gid = 0;
1157         }
1158         mutex_init(&lli->lli_layout_mutex);
1159         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1160 }
1161
1162 #define MAX_STRING_SIZE 128
1163
1164 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1165
1166 #define LSI_BDI_INITIALIZED     0x00400000
1167
1168 #ifndef HAVE_BDI_CAP_MAP_COPY
1169 # define BDI_CAP_MAP_COPY       0
1170 #endif
1171
1172 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1173 {
1174         struct  lustre_sb_info *lsi = s2lsi(sb);
1175         char buf[MAX_STRING_SIZE];
1176         va_list args;
1177         int err;
1178
1179         err = bdi_init(&lsi->lsi_bdi);
1180         if (err)
1181                 return err;
1182
1183         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1184         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1185         lsi->lsi_bdi.name = "lustre";
1186         va_start(args, fmt);
1187         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1188         va_end(args);
1189         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1190         va_end(args);
1191         if (!err)
1192                 sb->s_bdi = &lsi->lsi_bdi;
1193
1194         return err;
1195 }
1196 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1197
1198 int ll_fill_super(struct super_block *sb)
1199 {
1200         struct  lustre_profile *lprof = NULL;
1201         struct  lustre_sb_info *lsi = s2lsi(sb);
1202         struct  ll_sb_info *sbi = NULL;
1203         char    *dt = NULL, *md = NULL;
1204         char    *profilenm = get_profile_name(sb);
1205         struct config_llog_instance *cfg;
1206         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1207         const int instlen = LUSTRE_MAXINSTANCE + 2;
1208         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1209         char name[MAX_STRING_SIZE];
1210         int md_len = 0;
1211         int dt_len = 0;
1212         uuid_t uuid;
1213         char *ptr;
1214         int len;
1215         int err;
1216
1217         ENTRY;
1218         /* for ASLR, to map between cfg_instance and hashed ptr */
1219         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1220                profilenm, cfg_instance, sb);
1221
1222         OBD_RACE(OBD_FAIL_LLITE_RACE_MOUNT);
1223
1224         OBD_ALLOC_PTR(cfg);
1225         if (cfg == NULL)
1226                 GOTO(out_free_cfg, err = -ENOMEM);
1227
1228         /* client additional sb info */
1229         lsi->lsi_llsbi = sbi = ll_init_sbi();
1230         if (IS_ERR(sbi))
1231                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1232
1233         err = ll_options(lsi->lsi_lmd->lmd_opts, sbi);
1234         if (err)
1235                 GOTO(out_free_cfg, err);
1236
1237         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1238         sb->s_d_op = &ll_d_ops;
1239
1240         /* UUID handling */
1241         generate_random_uuid(uuid.b);
1242         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1243
1244         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1245
1246         /* Get fsname */
1247         len = strlen(profilenm);
1248         ptr = strrchr(profilenm, '-');
1249         if (ptr && (strcmp(ptr, "-client") == 0))
1250                 len -= 7;
1251
1252         if (len > LUSTRE_MAXFSNAME) {
1253                 if (unlikely(len >= MAX_STRING_SIZE))
1254                         len = MAX_STRING_SIZE - 1;
1255                 strncpy(name, profilenm, len);
1256                 name[len] = '\0';
1257                 err = -ENAMETOOLONG;
1258                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1259                        name, LUSTRE_MAXFSNAME, err);
1260                 GOTO(out_free_cfg, err);
1261         }
1262         strncpy(sbi->ll_fsname, profilenm, len);
1263         sbi->ll_fsname[len] = '\0';
1264
1265         /* Mount info */
1266         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1267                  profilenm, cfg_instance);
1268
1269         err = super_setup_bdi_name(sb, "%s", name);
1270         if (err)
1271                 GOTO(out_free_cfg, err);
1272
1273         /* Call ll_debugfs_register_super() before lustre_process_log()
1274          * so that "llite.*.*" params can be processed correctly.
1275          */
1276         err = ll_debugfs_register_super(sb, name);
1277         if (err < 0) {
1278                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1279                        sbi->ll_fsname, err);
1280                 err = 0;
1281         }
1282
1283         /* The cfg_instance is a value unique to this super, in case some
1284          * joker tries to mount the same fs at two mount points.
1285          */
1286         cfg->cfg_instance = cfg_instance;
1287         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1288         cfg->cfg_callback = class_config_llog_handler;
1289         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1290         /* set up client obds */
1291         err = lustre_process_log(sb, profilenm, cfg);
1292         if (err < 0)
1293                 GOTO(out_debugfs, err);
1294
1295         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1296         lprof = class_get_profile(profilenm);
1297         if (lprof == NULL) {
1298                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1299                                    " read from the MGS.  Does that filesystem "
1300                                    "exist?\n", profilenm);
1301                 GOTO(out_debugfs, err = -EINVAL);
1302         }
1303         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1304                lprof->lp_md, lprof->lp_dt);
1305
1306         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1307         OBD_ALLOC(dt, dt_len);
1308         if (!dt)
1309                 GOTO(out_profile, err = -ENOMEM);
1310         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1311
1312         md_len = strlen(lprof->lp_md) + instlen + 2;
1313         OBD_ALLOC(md, md_len);
1314         if (!md)
1315                 GOTO(out_free_dt, err = -ENOMEM);
1316         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1317
1318         /* connections, registrations, sb setup */
1319         err = client_common_fill_super(sb, md, dt);
1320         if (err < 0)
1321                 GOTO(out_free_md, err);
1322
1323         sbi->ll_client_common_fill_super_succeeded = 1;
1324
1325 out_free_md:
1326         if (md)
1327                 OBD_FREE(md, md_len);
1328 out_free_dt:
1329         if (dt)
1330                 OBD_FREE(dt, dt_len);
1331 out_profile:
1332         if (lprof)
1333                 class_put_profile(lprof);
1334 out_debugfs:
1335         if (err < 0)
1336                 ll_debugfs_unregister_super(sb);
1337 out_free_cfg:
1338         if (cfg)
1339                 OBD_FREE_PTR(cfg);
1340
1341         if (err)
1342                 ll_put_super(sb);
1343         else if (sbi->ll_flags & LL_SBI_VERBOSE)
1344                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1345         RETURN(err);
1346 } /* ll_fill_super */
1347
1348 void ll_put_super(struct super_block *sb)
1349 {
1350         struct config_llog_instance cfg, params_cfg;
1351         struct obd_device *obd;
1352         struct lustre_sb_info *lsi = s2lsi(sb);
1353         struct ll_sb_info *sbi = ll_s2sbi(sb);
1354         char *profilenm = get_profile_name(sb);
1355         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1356         long ccc_count;
1357         int next, force = 1, rc = 0;
1358         ENTRY;
1359
1360         if (IS_ERR(sbi))
1361                 GOTO(out_no_sbi, 0);
1362
1363         /* Should replace instance_id with something better for ASLR */
1364         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1365                profilenm, cfg_instance, sb);
1366
1367         cfg.cfg_instance = cfg_instance;
1368         lustre_end_log(sb, profilenm, &cfg);
1369
1370         params_cfg.cfg_instance = cfg_instance;
1371         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1372
1373         if (sbi->ll_md_exp) {
1374                 obd = class_exp2obd(sbi->ll_md_exp);
1375                 if (obd)
1376                         force = obd->obd_force;
1377         }
1378
1379         /* Wait for unstable pages to be committed to stable storage */
1380         if (force == 0) {
1381                 rc = l_wait_event_abortable(
1382                         sbi->ll_cache->ccc_unstable_waitq,
1383                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1384         }
1385
1386         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1387         if (force == 0 && rc != -ERESTARTSYS)
1388                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1389
1390         /* We need to set force before the lov_disconnect in
1391          * lustre_common_put_super, since l_d cleans up osc's as well.
1392          */
1393         if (force) {
1394                 next = 0;
1395                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1396                                                      &next)) != NULL) {
1397                         obd->obd_force = force;
1398                 }
1399         }
1400
1401         if (sbi->ll_client_common_fill_super_succeeded) {
1402                 /* Only if client_common_fill_super succeeded */
1403                 client_common_put_super(sb);
1404         }
1405
1406         next = 0;
1407         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1408                 class_manual_cleanup(obd);
1409
1410         if (sbi->ll_flags & LL_SBI_VERBOSE)
1411                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1412
1413         if (profilenm)
1414                 class_del_profile(profilenm);
1415
1416 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1417         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1418                 bdi_destroy(&lsi->lsi_bdi);
1419                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1420         }
1421 #endif
1422
1423         ll_free_sbi(sb);
1424         lsi->lsi_llsbi = NULL;
1425 out_no_sbi:
1426         lustre_common_put_super(sb);
1427
1428         cl_env_cache_purge(~0);
1429
1430         EXIT;
1431 } /* client_put_super */
1432
1433 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1434 {
1435         struct inode *inode = NULL;
1436
1437         /* NOTE: we depend on atomic igrab() -bzzz */
1438         lock_res_and_lock(lock);
1439         if (lock->l_resource->lr_lvb_inode) {
1440                 struct ll_inode_info * lli;
1441                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1442                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1443                         inode = igrab(lock->l_resource->lr_lvb_inode);
1444                 } else {
1445                         inode = lock->l_resource->lr_lvb_inode;
1446                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1447                                          D_WARNING, lock, "lr_lvb_inode %p is "
1448                                          "bogus: magic %08x",
1449                                          lock->l_resource->lr_lvb_inode,
1450                                          lli->lli_inode_magic);
1451                         inode = NULL;
1452                 }
1453         }
1454         unlock_res_and_lock(lock);
1455         return inode;
1456 }
1457
1458 void ll_dir_clear_lsm_md(struct inode *inode)
1459 {
1460         struct ll_inode_info *lli = ll_i2info(inode);
1461
1462         LASSERT(S_ISDIR(inode->i_mode));
1463
1464         if (lli->lli_lsm_md) {
1465                 lmv_free_memmd(lli->lli_lsm_md);
1466                 lli->lli_lsm_md = NULL;
1467         }
1468
1469         if (lli->lli_default_lsm_md) {
1470                 lmv_free_memmd(lli->lli_default_lsm_md);
1471                 lli->lli_default_lsm_md = NULL;
1472         }
1473 }
1474
1475 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1476                                       const struct lu_fid *fid,
1477                                       struct lustre_md *md)
1478 {
1479         struct ll_sb_info       *sbi = ll_s2sbi(sb);
1480         struct mdt_body         *body = md->body;
1481         struct inode            *inode;
1482         ino_t                   ino;
1483         ENTRY;
1484
1485         ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1486         inode = iget_locked(sb, ino);
1487         if (inode == NULL) {
1488                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1489                        sbi->ll_fsname, PFID(fid));
1490                 RETURN(ERR_PTR(-ENOENT));
1491         }
1492
1493         if (inode->i_state & I_NEW) {
1494                 struct ll_inode_info *lli = ll_i2info(inode);
1495                 struct lmv_stripe_md *lsm = md->lmv;
1496
1497                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1498                                 (body->mbo_mode & S_IFMT);
1499                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1500                          PFID(fid));
1501
1502                 inode->i_mtime.tv_sec = 0;
1503                 inode->i_atime.tv_sec = 0;
1504                 inode->i_ctime.tv_sec = 0;
1505                 inode->i_rdev = 0;
1506
1507 #ifdef HAVE_BACKING_DEV_INFO
1508                 /* initializing backing dev info. */
1509                 inode->i_mapping->backing_dev_info =
1510                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1511 #endif
1512                 inode->i_op = &ll_dir_inode_operations;
1513                 inode->i_fop = &ll_dir_operations;
1514                 lli->lli_fid = *fid;
1515                 ll_lli_init(lli);
1516
1517                 LASSERT(lsm != NULL);
1518                 /* master object FID */
1519                 lli->lli_pfid = body->mbo_fid1;
1520                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1521                        lli, PFID(fid), PFID(&lli->lli_pfid));
1522                 unlock_new_inode(inode);
1523         }
1524
1525         RETURN(inode);
1526 }
1527
1528 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1529 {
1530         struct lu_fid *fid;
1531         struct lmv_stripe_md *lsm = md->lmv;
1532         struct ll_inode_info *lli = ll_i2info(inode);
1533         int i;
1534
1535         LASSERT(lsm != NULL);
1536
1537         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1538                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1539         lsm_md_dump(D_INODE, lsm);
1540
1541         if (!lmv_dir_striped(lsm))
1542                 goto out;
1543
1544         /* XXX sigh, this lsm_root initialization should be in
1545          * LMV layer, but it needs ll_iget right now, so we
1546          * put this here right now. */
1547         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1548                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1549                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1550
1551                 if (!fid_is_sane(fid))
1552                         continue;
1553
1554                 /* Unfortunately ll_iget will call ll_update_inode,
1555                  * where the initialization of slave inode is slightly
1556                  * different, so it reset lsm_md to NULL to avoid
1557                  * initializing lsm for slave inode. */
1558                 lsm->lsm_md_oinfo[i].lmo_root =
1559                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1560                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1561                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1562
1563                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1564                         while (i-- > 0) {
1565                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1566                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1567                         }
1568                         return rc;
1569                 }
1570         }
1571 out:
1572         lli->lli_lsm_md = lsm;
1573
1574         return 0;
1575 }
1576
1577 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1578 {
1579         struct ll_inode_info *lli = ll_i2info(inode);
1580
1581         if (!md->default_lmv) {
1582                 /* clear default lsm */
1583                 if (lli->lli_default_lsm_md) {
1584                         down_write(&lli->lli_lsm_sem);
1585                         if (lli->lli_default_lsm_md) {
1586                                 lmv_free_memmd(lli->lli_default_lsm_md);
1587                                 lli->lli_default_lsm_md = NULL;
1588                         }
1589                         up_write(&lli->lli_lsm_sem);
1590                 }
1591                 return;
1592         }
1593
1594         if (lli->lli_default_lsm_md) {
1595                 /* do nonthing if default lsm isn't changed */
1596                 down_read(&lli->lli_lsm_sem);
1597                 if (lli->lli_default_lsm_md &&
1598                     lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1599                         up_read(&lli->lli_lsm_sem);
1600                         return;
1601                 }
1602                 up_read(&lli->lli_lsm_sem);
1603         }
1604
1605         down_write(&lli->lli_lsm_sem);
1606         if (lli->lli_default_lsm_md)
1607                 lmv_free_memmd(lli->lli_default_lsm_md);
1608         lli->lli_default_lsm_md = md->default_lmv;
1609         lsm_md_dump(D_INODE, md->default_lmv);
1610         md->default_lmv = NULL;
1611         up_write(&lli->lli_lsm_sem);
1612 }
1613
1614 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1615 {
1616         struct ll_inode_info *lli = ll_i2info(inode);
1617         struct lmv_stripe_md *lsm = md->lmv;
1618         struct cl_attr  *attr;
1619         int rc = 0;
1620
1621         ENTRY;
1622
1623         LASSERT(S_ISDIR(inode->i_mode));
1624         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1625                PFID(ll_inode2fid(inode)));
1626
1627         /* update default LMV */
1628         if (md->default_lmv)
1629                 ll_update_default_lsm_md(inode, md);
1630
1631         /*
1632          * no striped information from request, lustre_md from req does not
1633          * include stripeEA, see ll_md_setattr()
1634          */
1635         if (!lsm)
1636                 RETURN(0);
1637
1638         /*
1639          * normally dir layout doesn't change, only take read lock to check
1640          * that to avoid blocking other MD operations.
1641          */
1642         down_read(&lli->lli_lsm_sem);
1643
1644         /* some current lookup initialized lsm, and unchanged */
1645         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1646                 GOTO(unlock, rc = 0);
1647
1648         /* if dir layout doesn't match, check whether version is increased,
1649          * which means layout is changed, this happens in dir split/merge and
1650          * lfsck.
1651          *
1652          * foreign LMV should not change.
1653          */
1654         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1655             lsm->lsm_md_layout_version <=
1656             lli->lli_lsm_md->lsm_md_layout_version) {
1657                 CERROR("%s: "DFID" dir layout mismatch:\n",
1658                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1659                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1660                 lsm_md_dump(D_ERROR, lsm);
1661                 GOTO(unlock, rc = -EINVAL);
1662         }
1663
1664         up_read(&lli->lli_lsm_sem);
1665         down_write(&lli->lli_lsm_sem);
1666         /* clear existing lsm */
1667         if (lli->lli_lsm_md) {
1668                 lmv_free_memmd(lli->lli_lsm_md);
1669                 lli->lli_lsm_md = NULL;
1670         }
1671
1672         rc = ll_init_lsm_md(inode, md);
1673         up_write(&lli->lli_lsm_sem);
1674
1675         if (rc)
1676                 RETURN(rc);
1677
1678         /* set md->lmv to NULL, so the following free lustre_md will not free
1679          * this lsm.
1680          */
1681         md->lmv = NULL;
1682
1683         /* md_merge_attr() may take long, since lsm is already set, switch to
1684          * read lock.
1685          */
1686         down_read(&lli->lli_lsm_sem);
1687
1688         if (!lmv_dir_striped(lli->lli_lsm_md))
1689                 GOTO(unlock, rc = 0);
1690
1691         OBD_ALLOC_PTR(attr);
1692         if (!attr)
1693                 GOTO(unlock, rc = -ENOMEM);
1694
1695         /* validate the lsm */
1696         rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1697                            ll_md_blocking_ast);
1698         if (!rc) {
1699                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1700                         md->body->mbo_nlink = attr->cat_nlink;
1701                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1702                         md->body->mbo_size = attr->cat_size;
1703                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1704                         md->body->mbo_atime = attr->cat_atime;
1705                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1706                         md->body->mbo_ctime = attr->cat_ctime;
1707                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1708                         md->body->mbo_mtime = attr->cat_mtime;
1709         }
1710
1711         OBD_FREE_PTR(attr);
1712         GOTO(unlock, rc);
1713 unlock:
1714         up_read(&lli->lli_lsm_sem);
1715
1716         return rc;
1717 }
1718
1719 void ll_clear_inode(struct inode *inode)
1720 {
1721         struct ll_inode_info *lli = ll_i2info(inode);
1722         struct ll_sb_info *sbi = ll_i2sbi(inode);
1723
1724         ENTRY;
1725
1726         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1727                PFID(ll_inode2fid(inode)), inode);
1728
1729         if (S_ISDIR(inode->i_mode)) {
1730                 /* these should have been cleared in ll_file_release */
1731                 LASSERT(lli->lli_opendir_key == NULL);
1732                 LASSERT(lli->lli_sai == NULL);
1733                 LASSERT(lli->lli_opendir_pid == 0);
1734         } else {
1735                 pcc_inode_free(inode);
1736         }
1737
1738         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1739
1740         LASSERT(!lli->lli_open_fd_write_count);
1741         LASSERT(!lli->lli_open_fd_read_count);
1742         LASSERT(!lli->lli_open_fd_exec_count);
1743
1744         if (lli->lli_mds_write_och)
1745                 ll_md_real_close(inode, FMODE_WRITE);
1746         if (lli->lli_mds_exec_och)
1747                 ll_md_real_close(inode, FMODE_EXEC);
1748         if (lli->lli_mds_read_och)
1749                 ll_md_real_close(inode, FMODE_READ);
1750
1751         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1752                 OBD_FREE(lli->lli_symlink_name,
1753                          strlen(lli->lli_symlink_name) + 1);
1754                 lli->lli_symlink_name = NULL;
1755         }
1756
1757         ll_xattr_cache_destroy(inode);
1758
1759         forget_all_cached_acls(inode);
1760         lli_clear_acl(lli);
1761         lli->lli_inode_magic = LLI_INODE_DEAD;
1762
1763         if (S_ISDIR(inode->i_mode))
1764                 ll_dir_clear_lsm_md(inode);
1765         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1766                 LASSERT(list_empty(&lli->lli_agl_list));
1767
1768         /*
1769          * XXX This has to be done before lsm is freed below, because
1770          * cl_object still uses inode lsm.
1771          */
1772         cl_inode_fini(inode);
1773
1774         llcrypt_put_encryption_info(inode);
1775
1776         EXIT;
1777 }
1778
1779 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1780 {
1781         struct lustre_md md;
1782         struct inode *inode = dentry->d_inode;
1783         struct ll_sb_info *sbi = ll_i2sbi(inode);
1784         struct ptlrpc_request *request = NULL;
1785         int rc, ia_valid;
1786         ENTRY;
1787
1788         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1789                                      LUSTRE_OPC_ANY, NULL);
1790         if (IS_ERR(op_data))
1791                 RETURN(PTR_ERR(op_data));
1792
1793         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1794         if (rc) {
1795                 ptlrpc_req_finished(request);
1796                 if (rc == -ENOENT) {
1797                         clear_nlink(inode);
1798                         /* Unlinked special device node? Or just a race?
1799                          * Pretend we done everything. */
1800                         if (!S_ISREG(inode->i_mode) &&
1801                             !S_ISDIR(inode->i_mode)) {
1802                                 ia_valid = op_data->op_attr.ia_valid;
1803                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1804                                 rc = simple_setattr(dentry, &op_data->op_attr);
1805                                 op_data->op_attr.ia_valid = ia_valid;
1806                         }
1807                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1808                         CERROR("md_setattr fails: rc = %d\n", rc);
1809                 }
1810                 RETURN(rc);
1811         }
1812
1813         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1814                               sbi->ll_md_exp, &md);
1815         if (rc) {
1816                 ptlrpc_req_finished(request);
1817                 RETURN(rc);
1818         }
1819
1820         ia_valid = op_data->op_attr.ia_valid;
1821         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1822          * cache is not cleared yet. */
1823         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1824         if (S_ISREG(inode->i_mode))
1825                 inode_lock(inode);
1826         rc = simple_setattr(dentry, &op_data->op_attr);
1827         if (S_ISREG(inode->i_mode))
1828                 inode_unlock(inode);
1829         op_data->op_attr.ia_valid = ia_valid;
1830
1831         rc = ll_update_inode(inode, &md);
1832         ptlrpc_req_finished(request);
1833
1834         RETURN(rc);
1835 }
1836
1837 /**
1838  * Zero portion of page that is part of @inode.
1839  * This implies, if necessary:
1840  * - taking cl_lock on range corresponding to concerned page
1841  * - grabbing vm page
1842  * - associating cl_page
1843  * - proceeding to clio read
1844  * - zeroing range in page
1845  * - proceeding to cl_page flush
1846  * - releasing cl_lock
1847  *
1848  * \param[in] inode     inode
1849  * \param[in] index     page index
1850  * \param[in] offset    offset in page to start zero from
1851  * \param[in] len       len to zero
1852  *
1853  * \retval 0            on success
1854  * \retval negative     errno on failure
1855  */
1856 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1857                     unsigned len)
1858 {
1859         struct ll_inode_info *lli = ll_i2info(inode);
1860         struct cl_object *clob = lli->lli_clob;
1861         __u16 refcheck;
1862         struct lu_env *env = NULL;
1863         struct cl_io *io = NULL;
1864         struct cl_page *clpage = NULL;
1865         struct page *vmpage = NULL;
1866         unsigned from = index << PAGE_SHIFT;
1867         struct cl_lock *lock = NULL;
1868         struct cl_lock_descr *descr = NULL;
1869         struct cl_2queue *queue = NULL;
1870         struct cl_sync_io *anchor = NULL;
1871         bool holdinglock = false;
1872         bool lockedbymyself = true;
1873         int rc;
1874
1875         ENTRY;
1876
1877         env = cl_env_get(&refcheck);
1878         if (IS_ERR(env))
1879                 RETURN(PTR_ERR(env));
1880
1881         io = vvp_env_thread_io(env);
1882         io->ci_obj = clob;
1883         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1884         if (rc)
1885                 GOTO(putenv, rc);
1886
1887         lock = vvp_env_lock(env);
1888         descr = &lock->cll_descr;
1889         descr->cld_obj   = io->ci_obj;
1890         descr->cld_start = cl_index(io->ci_obj, from);
1891         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1892         descr->cld_mode  = CLM_WRITE;
1893         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1894
1895         /* request lock for page */
1896         rc = cl_lock_request(env, io, lock);
1897         /* -ECANCELED indicates a matching lock with a different extent
1898          * was already present, and -EEXIST indicates a matching lock
1899          * on exactly the same extent was already present.
1900          * In both cases it means we are covered.
1901          */
1902         if (rc == -ECANCELED || rc == -EEXIST)
1903                 rc = 0;
1904         else if (rc < 0)
1905                 GOTO(iofini, rc);
1906         else
1907                 holdinglock = true;
1908
1909         /* grab page */
1910         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1911         if (vmpage == NULL)
1912                 GOTO(rellock, rc = -EOPNOTSUPP);
1913
1914         if (!PageDirty(vmpage)) {
1915                 /* associate cl_page */
1916                 clpage = cl_page_find(env, clob, vmpage->index,
1917                                       vmpage, CPT_CACHEABLE);
1918                 if (IS_ERR(clpage))
1919                         GOTO(pagefini, rc = PTR_ERR(clpage));
1920
1921                 cl_page_assume(env, io, clpage);
1922         }
1923
1924         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1925             !PageWriteback(vmpage)) {
1926                 /* read page */
1927                 /* set PagePrivate2 to detect special case of empty page
1928                  * in osc_brw_fini_request()
1929                  */
1930                 SetPagePrivate2(vmpage);
1931                 rc = ll_io_read_page(env, io, clpage, NULL);
1932                 if (!PagePrivate2(vmpage))
1933                         /* PagePrivate2 was cleared in osc_brw_fini_request()
1934                          * meaning we read an empty page. In this case, in order
1935                          * to avoid allocating unnecessary block in truncated
1936                          * file, we must not zero and write as below. Subsequent
1937                          * server-side truncate will handle things correctly.
1938                          */
1939                         GOTO(clpfini, rc = 0);
1940                 ClearPagePrivate2(vmpage);
1941                 if (rc)
1942                         GOTO(clpfini, rc);
1943                 lockedbymyself = trylock_page(vmpage);
1944                 cl_page_assume(env, io, clpage);
1945         }
1946
1947         /* zero range in page */
1948         zero_user(vmpage, offset, len);
1949
1950         if (holdinglock && clpage) {
1951                 /* explicitly write newly modified page */
1952                 queue = &io->ci_queue;
1953                 cl_2queue_init(queue);
1954                 anchor = &vvp_env_info(env)->vti_anchor;
1955                 cl_sync_io_init(anchor, 1);
1956                 clpage->cp_sync_io = anchor;
1957                 cl_2queue_add(queue, clpage);
1958                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
1959                 if (rc)
1960                         GOTO(queuefini1, rc);
1961                 rc = cl_sync_io_wait(env, anchor, 0);
1962                 if (rc)
1963                         GOTO(queuefini2, rc);
1964                 cl_page_assume(env, io, clpage);
1965
1966 queuefini2:
1967                 cl_2queue_discard(env, io, queue);
1968 queuefini1:
1969                 cl_2queue_disown(env, io, queue);
1970                 cl_2queue_fini(env, queue);
1971         }
1972
1973 clpfini:
1974         if (clpage)
1975                 cl_page_put(env, clpage);
1976 pagefini:
1977         if (lockedbymyself) {
1978                 unlock_page(vmpage);
1979                 put_page(vmpage);
1980         }
1981 rellock:
1982         if (holdinglock)
1983                 cl_lock_release(env, lock);
1984 iofini:
1985         cl_io_fini(env, io);
1986 putenv:
1987         if (env)
1988                 cl_env_put(env, &refcheck);
1989
1990         RETURN(rc);
1991 }
1992
1993 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1994  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1995  * keep these values until such a time that objects are allocated for it.
1996  * We do the MDS operations first, as it is checking permissions for us.
1997  * We don't to the MDS RPC if there is nothing that we want to store there,
1998  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1999  * going to do an RPC anyways.
2000  *
2001  * If we are doing a truncate, we will send the mtime and ctime updates
2002  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
2003  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
2004  * at the same time.
2005  *
2006  * In case of HSMimport, we only set attr on MDS.
2007  */
2008 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
2009                    enum op_xvalid xvalid, bool hsm_import)
2010 {
2011         struct inode *inode = dentry->d_inode;
2012         struct ll_inode_info *lli = ll_i2info(inode);
2013         struct md_op_data *op_data = NULL;
2014         ktime_t kstart = ktime_get();
2015         int rc = 0;
2016
2017         ENTRY;
2018
2019         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
2020                "valid %x, hsm_import %d\n",
2021                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
2022                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
2023                hsm_import);
2024
2025         if (attr->ia_valid & ATTR_SIZE) {
2026                 /* Check new size against VFS/VM file size limit and rlimit */
2027                 rc = inode_newsize_ok(inode, attr->ia_size);
2028                 if (rc)
2029                         RETURN(rc);
2030
2031                 /* The maximum Lustre file size is variable, based on the
2032                  * OST maximum object size and number of stripes.  This
2033                  * needs another check in addition to the VFS check above. */
2034                 if (attr->ia_size > ll_file_maxbytes(inode)) {
2035                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
2036                                PFID(&lli->lli_fid), attr->ia_size,
2037                                ll_file_maxbytes(inode));
2038                         RETURN(-EFBIG);
2039                 }
2040
2041                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2042         }
2043
2044         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
2045         if (attr->ia_valid & TIMES_SET_FLAGS) {
2046                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2047                     !capable(CAP_FOWNER))
2048                         RETURN(-EPERM);
2049         }
2050
2051         /* We mark all of the fields "set" so MDS/OST does not re-set them */
2052         if (!(xvalid & OP_XVALID_CTIME_SET) &&
2053              (attr->ia_valid & ATTR_CTIME)) {
2054                 attr->ia_ctime = current_time(inode);
2055                 xvalid |= OP_XVALID_CTIME_SET;
2056         }
2057         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
2058             (attr->ia_valid & ATTR_ATIME)) {
2059                 attr->ia_atime = current_time(inode);
2060                 attr->ia_valid |= ATTR_ATIME_SET;
2061         }
2062         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
2063             (attr->ia_valid & ATTR_MTIME)) {
2064                 attr->ia_mtime = current_time(inode);
2065                 attr->ia_valid |= ATTR_MTIME_SET;
2066         }
2067
2068         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
2069                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
2070                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
2071                        ktime_get_real_seconds());
2072
2073         if (S_ISREG(inode->i_mode))
2074                 inode_unlock(inode);
2075
2076         /* We always do an MDS RPC, even if we're only changing the size;
2077          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
2078
2079         OBD_ALLOC_PTR(op_data);
2080         if (op_data == NULL)
2081                 GOTO(out, rc = -ENOMEM);
2082
2083         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
2084                 /* If we are changing file size, file content is
2085                  * modified, flag it.
2086                  */
2087                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2088                 op_data->op_bias |= MDS_DATA_MODIFIED;
2089                 clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
2090         }
2091
2092         if (attr->ia_valid & ATTR_FILE) {
2093                 struct ll_file_data *fd = attr->ia_file->private_data;
2094
2095                 if (fd->fd_lease_och)
2096                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2097         }
2098
2099         op_data->op_attr = *attr;
2100         op_data->op_xvalid = xvalid;
2101
2102         rc = ll_md_setattr(dentry, op_data);
2103         if (rc)
2104                 GOTO(out, rc);
2105
2106         if (!S_ISREG(inode->i_mode) || hsm_import)
2107                 GOTO(out, rc = 0);
2108
2109         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2110                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2111             xvalid & OP_XVALID_CTIME_SET) {
2112                 bool cached = false;
2113
2114                 rc = pcc_inode_setattr(inode, attr, &cached);
2115                 if (cached) {
2116                         if (rc) {
2117                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2118                                        "rc = %d\n",
2119                                        ll_i2sbi(inode)->ll_fsname,
2120                                        PFID(&lli->lli_fid), rc);
2121                                 GOTO(out, rc);
2122                         }
2123                 } else {
2124                         unsigned int flags = 0;
2125
2126                         /* For truncate and utimes sending attributes to OSTs,
2127                          * setting mtime/atime to the past will be performed
2128                          * under PW [0:EOF] extent lock (new_size:EOF for
2129                          * truncate). It may seem excessive to send mtime/atime
2130                          * updates to OSTs when not setting times to past, but
2131                          * it is necessary due to possible time
2132                          * de-synchronization between MDT inode and OST objects
2133                          */
2134                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
2135                             attr->ia_valid & ATTR_SIZE) {
2136                                 xvalid |= OP_XVALID_FLAGS;
2137                                 flags = LUSTRE_ENCRYPT_FL;
2138                                 /* Call to ll_io_zero_page is not necessary if
2139                                  * truncating on PAGE_SIZE boundary, because
2140                                  * whole pages will be wiped.
2141                                  * In case of Direct IO, all we need is to set
2142                                  * new size.
2143                                  */
2144                                 if (attr->ia_size & ~PAGE_MASK &&
2145                                     !(attr->ia_valid & ATTR_FILE &&
2146                                       attr->ia_file->f_flags & O_DIRECT)) {
2147                                         pgoff_t offset =
2148                                                 attr->ia_size & (PAGE_SIZE - 1);
2149
2150                                         rc = ll_io_zero_page(inode,
2151                                                     attr->ia_size >> PAGE_SHIFT,
2152                                                     offset, PAGE_SIZE - offset);
2153                                         if (rc)
2154                                                 GOTO(out, rc);
2155                                 }
2156                         }
2157                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2158                 }
2159         }
2160
2161         /* If the file was restored, it needs to set dirty flag.
2162          *
2163          * We've already sent MDS_DATA_MODIFIED flag in
2164          * ll_md_setattr() for truncate. However, the MDT refuses to
2165          * set the HS_DIRTY flag on released files, so we have to set
2166          * it again if the file has been restored. Please check how
2167          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2168          *
2169          * Please notice that if the file is not released, the previous
2170          * MDS_DATA_MODIFIED has taken effect and usually
2171          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2172          * This way we can save an RPC for common open + trunc
2173          * operation. */
2174         if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
2175                 struct hsm_state_set hss = {
2176                         .hss_valid = HSS_SETMASK,
2177                         .hss_setmask = HS_DIRTY,
2178                 };
2179                 int rc2;
2180
2181                 rc2 = ll_hsm_state_set(inode, &hss);
2182                 /* truncate and write can happen at the same time, so that
2183                  * the file can be set modified even though the file is not
2184                  * restored from released state, and ll_hsm_state_set() is
2185                  * not applicable for the file, and rc2 < 0 is normal in this
2186                  * case. */
2187                 if (rc2 < 0)
2188                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2189                                PFID(ll_inode2fid(inode)), rc2);
2190         }
2191
2192         EXIT;
2193 out:
2194         if (op_data != NULL)
2195                 ll_finish_md_op_data(op_data);
2196
2197         if (S_ISREG(inode->i_mode)) {
2198                 inode_lock(inode);
2199                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2200                         inode_dio_wait(inode);
2201                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2202                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2203                  * inode flags, so there is a gap where S_NOSEC is not set.
2204                  * This can cause a writer to take the i_mutex unnecessarily,
2205                  * but this is safe to do and should be rare. */
2206                 inode_has_no_xattr(inode);
2207         }
2208
2209         if (!rc)
2210                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2211                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2212                                    ktime_us_delta(ktime_get(), kstart));
2213
2214         return rc;
2215 }
2216
2217 int ll_setattr(struct dentry *de, struct iattr *attr)
2218 {
2219         int mode = de->d_inode->i_mode;
2220         enum op_xvalid xvalid = 0;
2221         int rc;
2222
2223         rc = llcrypt_prepare_setattr(de, attr);
2224         if (rc)
2225                 return rc;
2226
2227         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2228                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2229                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2230
2231         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2232                                (ATTR_SIZE|ATTR_MODE)) &&
2233             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2234              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2235               !(attr->ia_mode & S_ISGID))))
2236                 attr->ia_valid |= ATTR_FORCE;
2237
2238         if ((attr->ia_valid & ATTR_MODE) &&
2239             (mode & S_ISUID) &&
2240             !(attr->ia_mode & S_ISUID) &&
2241             !(attr->ia_valid & ATTR_KILL_SUID))
2242                 attr->ia_valid |= ATTR_KILL_SUID;
2243
2244         if ((attr->ia_valid & ATTR_MODE) &&
2245             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2246             !(attr->ia_mode & S_ISGID) &&
2247             !(attr->ia_valid & ATTR_KILL_SGID))
2248                 attr->ia_valid |= ATTR_KILL_SGID;
2249
2250         return ll_setattr_raw(de, attr, xvalid, false);
2251 }
2252
2253 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2254                        u32 flags)
2255 {
2256         struct obd_statfs obd_osfs = { 0 };
2257         time64_t max_age;
2258         int rc;
2259
2260         ENTRY;
2261         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2262
2263         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2264                 flags |= OBD_STATFS_NODELAY;
2265
2266         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2267         if (rc)
2268                 RETURN(rc);
2269
2270         osfs->os_type = LL_SUPER_MAGIC;
2271
2272         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2273               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2274
2275         if (osfs->os_state & OS_STATFS_SUM)
2276                 GOTO(out, rc);
2277
2278         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2279         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2280                 GOTO(out, rc = 0);
2281
2282         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2283                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2284                obd_osfs.os_files);
2285
2286         osfs->os_bsize = obd_osfs.os_bsize;
2287         osfs->os_blocks = obd_osfs.os_blocks;
2288         osfs->os_bfree = obd_osfs.os_bfree;
2289         osfs->os_bavail = obd_osfs.os_bavail;
2290
2291         /* If we have _some_ OSTs, but don't have as many free objects on the
2292          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2293          * to compensate, so that the "inodes in use" number is correct.
2294          * This should be kept in sync with lod_statfs() behaviour.
2295          */
2296         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2297                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2298                                  obd_osfs.os_ffree;
2299                 osfs->os_ffree = obd_osfs.os_ffree;
2300         }
2301
2302 out:
2303         RETURN(rc);
2304 }
2305
2306 static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
2307 {
2308         struct if_quotactl qctl = {
2309                 .qc_cmd = LUSTRE_Q_GETQUOTA,
2310                 .qc_type = PRJQUOTA,
2311                 .qc_valid = QC_GENERAL,
2312         };
2313         u64 limit, curblock;
2314         int ret;
2315
2316         qctl.qc_id = ll_i2info(inode)->lli_projid;
2317         ret = quotactl_ioctl(ll_i2sbi(inode), &qctl);
2318         if (ret) {
2319                 /* ignore errors if project ID does not have
2320                  * a quota limit or feature unsupported.
2321                  */
2322                 if (ret == -ESRCH || ret == -EOPNOTSUPP)
2323                         ret = 0;
2324                 return ret;
2325         }
2326
2327         limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
2328                  qctl.qc_dqblk.dqb_bsoftlimit :
2329                  qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
2330         if (limit && sfs->f_blocks > limit) {
2331                 curblock = (qctl.qc_dqblk.dqb_curspace +
2332                                 sfs->f_bsize - 1) / sfs->f_bsize;
2333                 sfs->f_blocks = limit;
2334                 sfs->f_bfree = sfs->f_bavail =
2335                         (sfs->f_blocks > curblock) ?
2336                         (sfs->f_blocks - curblock) : 0;
2337         }
2338
2339         limit = qctl.qc_dqblk.dqb_isoftlimit ?
2340                 qctl.qc_dqblk.dqb_isoftlimit :
2341                 qctl.qc_dqblk.dqb_ihardlimit;
2342         if (limit && sfs->f_files > limit) {
2343                 sfs->f_files = limit;
2344                 sfs->f_ffree = (sfs->f_files >
2345                         qctl.qc_dqblk.dqb_curinodes) ?
2346                         (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
2347         }
2348
2349         return 0;
2350 }
2351
2352 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2353 {
2354         struct super_block *sb = de->d_sb;
2355         struct obd_statfs osfs;
2356         __u64 fsid = huge_encode_dev(sb->s_dev);
2357         ktime_t kstart = ktime_get();
2358         int rc;
2359
2360         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2361
2362         /* Some amount of caching on the client is allowed */
2363         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2364         if (rc)
2365                 return rc;
2366
2367         statfs_unpack(sfs, &osfs);
2368
2369         /* We need to downshift for all 32-bit kernels, because we can't
2370          * tell if the kernel is being called via sys_statfs64() or not.
2371          * Stop before overflowing f_bsize - in which case it is better
2372          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2373         if (sizeof(long) < 8) {
2374                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2375                         sfs->f_bsize <<= 1;
2376
2377                         osfs.os_blocks >>= 1;
2378                         osfs.os_bfree >>= 1;
2379                         osfs.os_bavail >>= 1;
2380                 }
2381         }
2382
2383         sfs->f_blocks = osfs.os_blocks;
2384         sfs->f_bfree = osfs.os_bfree;
2385         sfs->f_bavail = osfs.os_bavail;
2386         sfs->f_fsid.val[0] = (__u32)fsid;
2387         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2388         if (ll_i2info(de->d_inode)->lli_projid)
2389                 return ll_statfs_project(de->d_inode, sfs);
2390
2391         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2392                            ktime_us_delta(ktime_get(), kstart));
2393
2394         return 0;
2395 }
2396
2397 void ll_inode_size_lock(struct inode *inode)
2398 {
2399         struct ll_inode_info *lli;
2400
2401         LASSERT(!S_ISDIR(inode->i_mode));
2402
2403         lli = ll_i2info(inode);
2404         mutex_lock(&lli->lli_size_mutex);
2405 }
2406
2407 void ll_inode_size_unlock(struct inode *inode)
2408 {
2409         struct ll_inode_info *lli;
2410
2411         lli = ll_i2info(inode);
2412         mutex_unlock(&lli->lli_size_mutex);
2413 }
2414
2415 void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags)
2416 {
2417         /* do not clear encryption flag */
2418         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2419         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2420         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2421                 set_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2422         else
2423                 clear_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2424 }
2425
2426 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2427 {
2428         struct ll_inode_info *lli = ll_i2info(inode);
2429         struct mdt_body *body = md->body;
2430         struct ll_sb_info *sbi = ll_i2sbi(inode);
2431         int rc = 0;
2432
2433         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2434                 rc = cl_file_inode_init(inode, md);
2435                 if (rc)
2436                         return rc;
2437         }
2438
2439         if (S_ISDIR(inode->i_mode)) {
2440                 rc = ll_update_lsm_md(inode, md);
2441                 if (rc != 0)
2442                         return rc;
2443         }
2444
2445         if (body->mbo_valid & OBD_MD_FLACL)
2446                 lli_replace_acl(lli, md);
2447
2448         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
2449                                         sbi->ll_flags & LL_SBI_32BIT_API);
2450         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2451
2452         if (body->mbo_valid & OBD_MD_FLATIME) {
2453                 if (body->mbo_atime > inode->i_atime.tv_sec)
2454                         inode->i_atime.tv_sec = body->mbo_atime;
2455                 lli->lli_atime = body->mbo_atime;
2456         }
2457
2458         if (body->mbo_valid & OBD_MD_FLMTIME) {
2459                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2460                         CDEBUG(D_INODE,
2461                                "setting ino %lu mtime from %lld to %llu\n",
2462                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2463                                body->mbo_mtime);
2464                         inode->i_mtime.tv_sec = body->mbo_mtime;
2465                 }
2466                 lli->lli_mtime = body->mbo_mtime;
2467         }
2468
2469         if (body->mbo_valid & OBD_MD_FLCTIME) {
2470                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2471                         inode->i_ctime.tv_sec = body->mbo_ctime;
2472                 lli->lli_ctime = body->mbo_ctime;
2473         }
2474
2475         if (body->mbo_valid & OBD_MD_FLBTIME)
2476                 lli->lli_btime = body->mbo_btime;
2477
2478         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2479         if (body->mbo_valid & OBD_MD_FLFLAGS)
2480                 ll_update_inode_flags(inode, body->mbo_flags);
2481         if (body->mbo_valid & OBD_MD_FLMODE)
2482                 inode->i_mode = (inode->i_mode & S_IFMT) |
2483                                 (body->mbo_mode & ~S_IFMT);
2484
2485         if (body->mbo_valid & OBD_MD_FLTYPE)
2486                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2487                                 (body->mbo_mode & S_IFMT);
2488
2489         LASSERT(inode->i_mode != 0);
2490         if (body->mbo_valid & OBD_MD_FLUID)
2491                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2492         if (body->mbo_valid & OBD_MD_FLGID)
2493                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2494         if (body->mbo_valid & OBD_MD_FLPROJID)
2495                 lli->lli_projid = body->mbo_projid;
2496         if (body->mbo_valid & OBD_MD_FLNLINK)
2497                 set_nlink(inode, body->mbo_nlink);
2498         if (body->mbo_valid & OBD_MD_FLRDEV)
2499                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2500
2501         if (body->mbo_valid & OBD_MD_FLID) {
2502                 /* FID shouldn't be changed! */
2503                 if (fid_is_sane(&lli->lli_fid)) {
2504                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2505                                  "Trying to change FID "DFID
2506                                  " to the "DFID", inode "DFID"(%p)\n",
2507                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2508                                  PFID(ll_inode2fid(inode)), inode);
2509                 } else {
2510                         lli->lli_fid = body->mbo_fid1;
2511                 }
2512         }
2513
2514         LASSERT(fid_seq(&lli->lli_fid) != 0);
2515
2516         lli->lli_attr_valid = body->mbo_valid;
2517         if (body->mbo_valid & OBD_MD_FLSIZE) {
2518                 i_size_write(inode, body->mbo_size);
2519
2520                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2521                        PFID(ll_inode2fid(inode)),
2522                        (unsigned long long)body->mbo_size);
2523
2524                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2525                         inode->i_blocks = body->mbo_blocks;
2526         } else {
2527                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2528                         lli->lli_lazysize = body->mbo_size;
2529                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2530                         lli->lli_lazyblocks = body->mbo_blocks;
2531         }
2532
2533         if (body->mbo_valid & OBD_MD_TSTATE) {
2534                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2535                  * clear it when done to ensure to start again
2536                  * glimpsing updated attrs
2537                  */
2538                 if (body->mbo_t_state & MS_RESTORE)
2539                         set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2540                 else
2541                         clear_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2542         }
2543
2544         return 0;
2545 }
2546
2547 int ll_read_inode2(struct inode *inode, void *opaque)
2548 {
2549         struct lustre_md *md = opaque;
2550         struct ll_inode_info *lli = ll_i2info(inode);
2551         int     rc;
2552         ENTRY;
2553
2554         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2555                PFID(&lli->lli_fid), inode);
2556
2557         /* Core attributes from the MDS first.  This is a new inode, and
2558          * the VFS doesn't zero times in the core inode so we have to do
2559          * it ourselves.  They will be overwritten by either MDS or OST
2560          * attributes - we just need to make sure they aren't newer.
2561          */
2562         inode->i_mtime.tv_sec = 0;
2563         inode->i_atime.tv_sec = 0;
2564         inode->i_ctime.tv_sec = 0;
2565         inode->i_rdev = 0;
2566         rc = ll_update_inode(inode, md);
2567         if (rc != 0)
2568                 RETURN(rc);
2569
2570         /* OIDEBUG(inode); */
2571
2572 #ifdef HAVE_BACKING_DEV_INFO
2573         /* initializing backing dev info. */
2574         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2575 #endif
2576         if (S_ISREG(inode->i_mode)) {
2577                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2578                 inode->i_op = &ll_file_inode_operations;
2579                 inode->i_fop = sbi->ll_fop;
2580                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2581                 EXIT;
2582         } else if (S_ISDIR(inode->i_mode)) {
2583                 inode->i_op = &ll_dir_inode_operations;
2584                 inode->i_fop = &ll_dir_operations;
2585                 EXIT;
2586         } else if (S_ISLNK(inode->i_mode)) {
2587                 inode->i_op = &ll_fast_symlink_inode_operations;
2588                 EXIT;
2589         } else {
2590                 inode->i_op = &ll_special_inode_operations;
2591
2592                 init_special_inode(inode, inode->i_mode,
2593                                    inode->i_rdev);
2594
2595                 EXIT;
2596         }
2597
2598         return 0;
2599 }
2600
2601 void ll_delete_inode(struct inode *inode)
2602 {
2603         struct ll_inode_info *lli = ll_i2info(inode);
2604         struct address_space *mapping = &inode->i_data;
2605         unsigned long nrpages;
2606         unsigned long flags;
2607
2608         ENTRY;
2609
2610         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2611                 /* It is last chance to write out dirty pages,
2612                  * otherwise we may lose data while umount.
2613                  *
2614                  * If i_nlink is 0 then just discard data. This is safe because
2615                  * local inode gets i_nlink 0 from server only for the last
2616                  * unlink, so that file is not opened somewhere else
2617                  */
2618                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2619                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2620         }
2621         truncate_inode_pages_final(mapping);
2622
2623         /* Workaround for LU-118: Note nrpages may not be totally updated when
2624          * truncate_inode_pages() returns, as there can be a page in the process
2625          * of deletion (inside __delete_from_page_cache()) in the specified
2626          * range. Thus mapping->nrpages can be non-zero when this function
2627          * returns even after truncation of the whole mapping.  Only do this if
2628          * npages isn't already zero.
2629          */
2630         nrpages = mapping->nrpages;
2631         if (nrpages) {
2632                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2633                 nrpages = mapping->nrpages;
2634                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2635         } /* Workaround end */
2636
2637         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2638                  "see https://jira.whamcloud.com/browse/LU-118\n",
2639                  ll_i2sbi(inode)->ll_fsname,
2640                  PFID(ll_inode2fid(inode)), inode, nrpages);
2641
2642         ll_clear_inode(inode);
2643         clear_inode(inode);
2644
2645         EXIT;
2646 }
2647
2648 int ll_iocontrol(struct inode *inode, struct file *file,
2649                  unsigned int cmd, unsigned long arg)
2650 {
2651         struct ll_sb_info *sbi = ll_i2sbi(inode);
2652         struct ptlrpc_request *req = NULL;
2653         int rc, flags = 0;
2654         ENTRY;
2655
2656         switch (cmd) {
2657         case FS_IOC_GETFLAGS: {
2658                 struct mdt_body *body;
2659                 struct md_op_data *op_data;
2660
2661                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2662                                              0, 0, LUSTRE_OPC_ANY,
2663                                              NULL);
2664                 if (IS_ERR(op_data))
2665                         RETURN(PTR_ERR(op_data));
2666
2667                 op_data->op_valid = OBD_MD_FLFLAGS;
2668                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2669                 ll_finish_md_op_data(op_data);
2670                 if (rc) {
2671                         CERROR("%s: failure inode "DFID": rc = %d\n",
2672                                sbi->ll_md_exp->exp_obd->obd_name,
2673                                PFID(ll_inode2fid(inode)), rc);
2674                         RETURN(-abs(rc));
2675                 }
2676
2677                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2678
2679                 flags = body->mbo_flags;
2680
2681                 ptlrpc_req_finished(req);
2682
2683                 RETURN(put_user(flags, (int __user *)arg));
2684         }
2685         case FS_IOC_SETFLAGS: {
2686                 struct iattr *attr;
2687                 struct md_op_data *op_data;
2688                 struct cl_object *obj;
2689                 struct fsxattr fa = { 0 };
2690
2691                 if (get_user(flags, (int __user *)arg))
2692                         RETURN(-EFAULT);
2693
2694                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2695                 if (flags & LUSTRE_PROJINHERIT_FL)
2696                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2697
2698                 rc = ll_ioctl_check_project(inode, &fa);
2699                 if (rc)
2700                         RETURN(rc);
2701
2702                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2703                                              LUSTRE_OPC_ANY, NULL);
2704                 if (IS_ERR(op_data))
2705                         RETURN(PTR_ERR(op_data));
2706
2707                 op_data->op_attr_flags = flags;
2708                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2709                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2710                 ll_finish_md_op_data(op_data);
2711                 ptlrpc_req_finished(req);
2712                 if (rc)
2713                         RETURN(rc);
2714
2715                 ll_update_inode_flags(inode, flags);
2716
2717                 obj = ll_i2info(inode)->lli_clob;
2718                 if (obj == NULL)
2719                         RETURN(0);
2720
2721                 OBD_ALLOC_PTR(attr);
2722                 if (attr == NULL)
2723                         RETURN(-ENOMEM);
2724
2725                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2726
2727                 OBD_FREE_PTR(attr);
2728                 RETURN(rc);
2729         }
2730         default:
2731                 RETURN(-ENOSYS);
2732         }
2733
2734         RETURN(0);
2735 }
2736
2737 int ll_flush_ctx(struct inode *inode)
2738 {
2739         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2740
2741         CDEBUG(D_SEC, "flush context for user %d\n",
2742                from_kuid(&init_user_ns, current_uid()));
2743
2744         obd_set_info_async(NULL, sbi->ll_md_exp,
2745                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2746                            0, NULL, NULL);
2747         obd_set_info_async(NULL, sbi->ll_dt_exp,
2748                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2749                            0, NULL, NULL);
2750         return 0;
2751 }
2752
2753 /* umount -f client means force down, don't save state */
2754 void ll_umount_begin(struct super_block *sb)
2755 {
2756         struct ll_sb_info *sbi = ll_s2sbi(sb);
2757         struct obd_device *obd;
2758         struct obd_ioctl_data *ioc_data;
2759         int cnt;
2760         ENTRY;
2761
2762         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2763                sb->s_count, atomic_read(&sb->s_active));
2764
2765         obd = class_exp2obd(sbi->ll_md_exp);
2766         if (obd == NULL) {
2767                 CERROR("Invalid MDC connection handle %#llx\n",
2768                        sbi->ll_md_exp->exp_handle.h_cookie);
2769                 EXIT;
2770                 return;
2771         }
2772         obd->obd_force = 1;
2773
2774         obd = class_exp2obd(sbi->ll_dt_exp);
2775         if (obd == NULL) {
2776                 CERROR("Invalid LOV connection handle %#llx\n",
2777                        sbi->ll_dt_exp->exp_handle.h_cookie);
2778                 EXIT;
2779                 return;
2780         }
2781         obd->obd_force = 1;
2782
2783         OBD_ALLOC_PTR(ioc_data);
2784         if (ioc_data) {
2785                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2786                               sizeof *ioc_data, ioc_data, NULL);
2787
2788                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2789                               sizeof *ioc_data, ioc_data, NULL);
2790
2791                 OBD_FREE_PTR(ioc_data);
2792         }
2793
2794         /* Really, we'd like to wait until there are no requests outstanding,
2795          * and then continue.  For now, we just periodically checking for vfs
2796          * to decrement mnt_cnt and hope to finish it within 10sec.
2797          */
2798         cnt = 10;
2799         while (cnt > 0 &&
2800                !may_umount(sbi->ll_mnt.mnt)) {
2801                 ssleep(1);
2802                 cnt -= 1;
2803         }
2804
2805         EXIT;
2806 }
2807
2808 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2809 {
2810         struct ll_sb_info *sbi = ll_s2sbi(sb);
2811         char *profilenm = get_profile_name(sb);
2812         int err;
2813         __u32 read_only;
2814
2815         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
2816                 read_only = *flags & MS_RDONLY;
2817                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2818                                          sizeof(KEY_READ_ONLY),
2819                                          KEY_READ_ONLY, sizeof(read_only),
2820                                          &read_only, NULL);
2821                 if (err) {
2822                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2823                                       profilenm, read_only ?
2824                                       "read-only" : "read-write", err);
2825                         return err;
2826                 }
2827
2828                 if (read_only)
2829                         sb->s_flags |= SB_RDONLY;
2830                 else
2831                         sb->s_flags &= ~SB_RDONLY;
2832
2833                 if (sbi->ll_flags & LL_SBI_VERBOSE)
2834                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2835                                       read_only ?  "read-only" : "read-write");
2836         }
2837         return 0;
2838 }
2839
2840 /**
2841  * Cleanup the open handle that is cached on MDT-side.
2842  *
2843  * For open case, the client side open handling thread may hit error
2844  * after the MDT grant the open. Under such case, the client should
2845  * send close RPC to the MDT as cleanup; otherwise, the open handle
2846  * on the MDT will be leaked there until the client umount or evicted.
2847  *
2848  * In further, if someone unlinked the file, because the open handle
2849  * holds the reference on such file/object, then it will block the
2850  * subsequent threads that want to locate such object via FID.
2851  *
2852  * \param[in] sb        super block for this file-system
2853  * \param[in] open_req  pointer to the original open request
2854  */
2855 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2856 {
2857         struct mdt_body                 *body;
2858         struct md_op_data               *op_data;
2859         struct ptlrpc_request           *close_req = NULL;
2860         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
2861         ENTRY;
2862
2863         body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
2864         OBD_ALLOC_PTR(op_data);
2865         if (op_data == NULL) {
2866                 CWARN("%s: cannot allocate op_data to release open handle for "
2867                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
2868
2869                 RETURN_EXIT;
2870         }
2871
2872         op_data->op_fid1 = body->mbo_fid1;
2873         op_data->op_open_handle = body->mbo_open_handle;
2874         op_data->op_mod_time = ktime_get_real_seconds();
2875         md_close(exp, op_data, NULL, &close_req);
2876         ptlrpc_req_finished(close_req);
2877         ll_finish_md_op_data(op_data);
2878
2879         EXIT;
2880 }
2881
2882 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2883                   struct super_block *sb, struct lookup_intent *it)
2884 {
2885         struct ll_sb_info *sbi = NULL;
2886         struct lustre_md md = { NULL };
2887         bool default_lmv_deleted = false;
2888         int rc;
2889
2890         ENTRY;
2891
2892         LASSERT(*inode || sb);
2893         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2894         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2895                               sbi->ll_md_exp, &md);
2896         if (rc != 0)
2897                 GOTO(out, rc);
2898
2899         /*
2900          * clear default_lmv only if intent_getattr reply doesn't contain it.
2901          * but it needs to be done after iget, check this early because
2902          * ll_update_lsm_md() may change md.
2903          */
2904         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
2905             S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
2906                 default_lmv_deleted = true;
2907
2908         if (*inode) {
2909                 rc = ll_update_inode(*inode, &md);
2910                 if (rc != 0)
2911                         GOTO(out, rc);
2912         } else {
2913                 LASSERT(sb != NULL);
2914
2915                 /*
2916                  * At this point server returns to client's same fid as client
2917                  * generated for creating. So using ->fid1 is okay here.
2918                  */
2919                 if (!fid_is_sane(&md.body->mbo_fid1)) {
2920                         CERROR("%s: Fid is insane "DFID"\n",
2921                                 sbi->ll_fsname,
2922                                 PFID(&md.body->mbo_fid1));
2923                         GOTO(out, rc = -EINVAL);
2924                 }
2925
2926                 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
2927                                              sbi->ll_flags & LL_SBI_32BIT_API),
2928                                  &md);
2929                 if (IS_ERR(*inode)) {
2930                         lmd_clear_acl(&md);
2931                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2932                         *inode = NULL;
2933                         CERROR("new_inode -fatal: rc %d\n", rc);
2934                         GOTO(out, rc);
2935                 }
2936         }
2937
2938         /* Handling piggyback layout lock.
2939          * Layout lock can be piggybacked by getattr and open request.
2940          * The lsm can be applied to inode only if it comes with a layout lock
2941          * otherwise correct layout may be overwritten, for example:
2942          * 1. proc1: mdt returns a lsm but not granting layout
2943          * 2. layout was changed by another client
2944          * 3. proc2: refresh layout and layout lock granted
2945          * 4. proc1: to apply a stale layout */
2946         if (it != NULL && it->it_lock_mode != 0) {
2947                 struct lustre_handle lockh;
2948                 struct ldlm_lock *lock;
2949
2950                 lockh.cookie = it->it_lock_handle;
2951                 lock = ldlm_handle2lock(&lockh);
2952                 LASSERT(lock != NULL);
2953                 if (ldlm_has_layout(lock)) {
2954                         struct cl_object_conf conf;
2955
2956                         memset(&conf, 0, sizeof(conf));
2957                         conf.coc_opc = OBJECT_CONF_SET;
2958                         conf.coc_inode = *inode;
2959                         conf.coc_lock = lock;
2960                         conf.u.coc_layout = md.layout;
2961                         (void)ll_layout_conf(*inode, &conf);
2962                 }
2963                 LDLM_LOCK_PUT(lock);
2964         }
2965
2966         if (default_lmv_deleted)
2967                 ll_update_default_lsm_md(*inode, &md);
2968
2969         /* we may want to apply some policy for foreign file/dir */
2970         if (ll_sbi_has_foreign_symlink(sbi)) {
2971                 rc = ll_manage_foreign(*inode, &md);
2972                 if (rc < 0)
2973                         GOTO(out, rc);
2974         }
2975
2976         GOTO(out, rc = 0);
2977
2978 out:
2979         /* cleanup will be done if necessary */
2980         md_free_lustre_md(sbi->ll_md_exp, &md);
2981
2982         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
2983                 ll_intent_drop_lock(it);
2984                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, req);
2985         }
2986
2987         return rc;
2988 }
2989
2990 int ll_obd_statfs(struct inode *inode, void __user *arg)
2991 {
2992         struct ll_sb_info *sbi = NULL;
2993         struct obd_export *exp;
2994         struct obd_ioctl_data *data = NULL;
2995         __u32 type;
2996         int len = 0, rc;
2997
2998         if (inode)
2999                 sbi = ll_i2sbi(inode);
3000         if (!sbi)
3001                 GOTO(out_statfs, rc = -EINVAL);
3002
3003         rc = obd_ioctl_getdata(&data, &len, arg);
3004         if (rc)
3005                 GOTO(out_statfs, rc);
3006
3007         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
3008             !data->ioc_pbuf1 || !data->ioc_pbuf2)
3009                 GOTO(out_statfs, rc = -EINVAL);
3010
3011         if (data->ioc_inllen1 != sizeof(__u32) ||
3012             data->ioc_inllen2 != sizeof(__u32) ||
3013             data->ioc_plen1 != sizeof(struct obd_statfs) ||
3014             data->ioc_plen2 != sizeof(struct obd_uuid))
3015                 GOTO(out_statfs, rc = -EINVAL);
3016
3017         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
3018         if (type & LL_STATFS_LMV)
3019                 exp = sbi->ll_md_exp;
3020         else if (type & LL_STATFS_LOV)
3021                 exp = sbi->ll_dt_exp;
3022         else
3023                 GOTO(out_statfs, rc = -ENODEV);
3024
3025         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL);
3026         if (rc)
3027                 GOTO(out_statfs, rc);
3028 out_statfs:
3029         OBD_FREE_LARGE(data, len);
3030         return rc;
3031 }
3032
3033 /*
3034  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
3035  * be called early to avoid deadlock.
3036  */
3037 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
3038 {
3039         if (op_data->op_mea2_sem) {
3040                 up_read_non_owner(op_data->op_mea2_sem);
3041                 op_data->op_mea2_sem = NULL;
3042         }
3043
3044         if (op_data->op_mea1_sem) {
3045                 up_read_non_owner(op_data->op_mea1_sem);
3046                 op_data->op_mea1_sem = NULL;
3047         }
3048 }
3049
3050 /* this function prepares md_op_data hint for passing it down to MD stack. */
3051 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
3052                                       struct inode *i1, struct inode *i2,
3053                                       const char *name, size_t namelen,
3054                                       __u32 mode, enum md_op_code opc,
3055                                       void *data)
3056 {
3057         LASSERT(i1 != NULL);
3058
3059         if (name == NULL) {
3060                 /* Do not reuse namelen for something else. */
3061                 if (namelen != 0)
3062                         return ERR_PTR(-EINVAL);
3063         } else {
3064                 if (namelen > ll_i2sbi(i1)->ll_namelen)
3065                         return ERR_PTR(-ENAMETOOLONG);
3066
3067                 /* "/" is not valid name, but it's allowed */
3068                 if (!lu_name_is_valid_2(name, namelen) &&
3069                     strncmp("/", name, namelen) != 0)
3070                         return ERR_PTR(-EINVAL);
3071         }
3072
3073         if (op_data == NULL)
3074                 OBD_ALLOC_PTR(op_data);
3075
3076         if (op_data == NULL)
3077                 return ERR_PTR(-ENOMEM);
3078
3079         ll_i2gids(op_data->op_suppgids, i1, i2);
3080         op_data->op_fid1 = *ll_inode2fid(i1);
3081         op_data->op_code = opc;
3082
3083         if (S_ISDIR(i1->i_mode)) {
3084                 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
3085                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
3086                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
3087                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
3088         }
3089
3090         if (i2) {
3091                 op_data->op_fid2 = *ll_inode2fid(i2);
3092                 if (S_ISDIR(i2->i_mode)) {
3093                         if (i2 != i1) {
3094                                 /* i2 is typically a child of i1, and MUST be
3095                                  * further from the root to avoid deadlocks.
3096                                  */
3097                                 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
3098                                 op_data->op_mea2_sem =
3099                                                 &ll_i2info(i2)->lli_lsm_sem;
3100                         }
3101                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
3102                 }
3103         } else {
3104                 fid_zero(&op_data->op_fid2);
3105         }
3106
3107         if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
3108                 op_data->op_cli_flags |= CLI_HASH64;
3109
3110         if (ll_need_32bit_api(ll_i2sbi(i1)))
3111                 op_data->op_cli_flags |= CLI_API32;
3112
3113         op_data->op_name = name;
3114         op_data->op_namelen = namelen;
3115         op_data->op_mode = mode;
3116         op_data->op_mod_time = ktime_get_real_seconds();
3117         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
3118         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
3119         op_data->op_cap = cfs_curproc_cap_pack();
3120         op_data->op_mds = 0;
3121         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
3122              filename_is_volatile(name, namelen, &op_data->op_mds)) {
3123                 op_data->op_bias |= MDS_CREATE_VOLATILE;
3124         }
3125         op_data->op_data = data;
3126
3127         return op_data;
3128 }
3129
3130 void ll_finish_md_op_data(struct md_op_data *op_data)
3131 {
3132         ll_unlock_md_op_lsm(op_data);
3133         ll_security_release_secctx(op_data->op_file_secctx,
3134                                    op_data->op_file_secctx_size);
3135         llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size);
3136         OBD_FREE_PTR(op_data);
3137 }
3138
3139 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
3140 {
3141         struct ll_sb_info *sbi;
3142
3143         LASSERT(seq && dentry);
3144         sbi = ll_s2sbi(dentry->d_sb);
3145
3146         if (sbi->ll_flags & LL_SBI_NOLCK)
3147                 seq_puts(seq, ",nolock");
3148
3149         /* "flock" is the default since 2.13, but it wasn't for many years,
3150          * so it is still useful to print this to show it is enabled.
3151          * Start to print "noflock" so it is now clear when flock is disabled.
3152          */
3153         if (sbi->ll_flags & LL_SBI_FLOCK)
3154                 seq_puts(seq, ",flock");
3155         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
3156                 seq_puts(seq, ",localflock");
3157         else
3158                 seq_puts(seq, ",noflock");
3159
3160         if (sbi->ll_flags & LL_SBI_USER_XATTR)
3161                 seq_puts(seq, ",user_xattr");
3162
3163         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
3164                 seq_puts(seq, ",lazystatfs");
3165
3166         if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
3167                 seq_puts(seq, ",user_fid2path");
3168
3169         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
3170                 seq_puts(seq, ",always_ping");
3171
3172         if (ll_sbi_has_test_dummy_encryption(sbi))
3173                 seq_puts(seq, ",test_dummy_encryption");
3174
3175         if (ll_sbi_has_encrypt(sbi))
3176                 seq_puts(seq, ",encrypt");
3177         else
3178                 seq_puts(seq, ",noencrypt");
3179
3180         if (sbi->ll_flags & LL_SBI_FOREIGN_SYMLINK) {
3181                 seq_puts(seq, ",foreign_symlink=");
3182                 seq_puts(seq, sbi->ll_foreign_symlink_prefix);
3183         }
3184
3185         RETURN(0);
3186 }
3187
3188 /**
3189  * Get obd name by cmd, and copy out to user space
3190  */
3191 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3192 {
3193         struct ll_sb_info *sbi = ll_i2sbi(inode);
3194         struct obd_device *obd;
3195         ENTRY;
3196
3197         if (cmd == OBD_IOC_GETNAME_OLD || cmd == OBD_IOC_GETDTNAME)
3198                 obd = class_exp2obd(sbi->ll_dt_exp);
3199         else if (cmd == OBD_IOC_GETMDNAME)
3200                 obd = class_exp2obd(sbi->ll_md_exp);
3201         else
3202                 RETURN(-EINVAL);
3203
3204         if (!obd)
3205                 RETURN(-ENOENT);
3206
3207         if (copy_to_user((void __user *)arg, obd->obd_name,
3208                          strlen(obd->obd_name) + 1))
3209                 RETURN(-EFAULT);
3210
3211         RETURN(0);
3212 }
3213
3214 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3215 {
3216         char *path = NULL;
3217
3218         struct path p;
3219
3220         p.dentry = dentry;
3221         p.mnt = current->fs->root.mnt;
3222         path_get(&p);
3223         path = d_path(&p, buf, bufsize);
3224         path_put(&p);
3225         return path;
3226 }
3227
3228 void ll_dirty_page_discard_warn(struct page *page, int ioret)
3229 {
3230         char *buf, *path = NULL;
3231         struct dentry *dentry = NULL;
3232         struct inode *inode = page->mapping->host;
3233
3234         /* this can be called inside spin lock so use GFP_ATOMIC. */
3235         buf = (char *)__get_free_page(GFP_ATOMIC);
3236         if (buf != NULL) {
3237                 dentry = d_find_alias(page->mapping->host);
3238                 if (dentry != NULL)
3239                         path = ll_d_path(dentry, buf, PAGE_SIZE);
3240         }
3241
3242         /* The below message is checked in recovery-small.sh test_24b */
3243         CDEBUG(D_WARNING,
3244                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3245                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3246                s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
3247                PFID(ll_inode2fid(inode)),
3248                (path && !IS_ERR(path)) ? path : "", ioret);
3249
3250         if (dentry != NULL)
3251                 dput(dentry);
3252
3253         if (buf != NULL)
3254                 free_page((unsigned long)buf);
3255 }
3256
3257 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3258                         struct lov_user_md **kbuf)
3259 {
3260         struct lov_user_md      lum;
3261         ssize_t                 lum_size;
3262         ENTRY;
3263
3264         if (copy_from_user(&lum, md, sizeof(lum)))
3265                 RETURN(-EFAULT);
3266
3267         lum_size = ll_lov_user_md_size(&lum);
3268         if (lum_size < 0)
3269                 RETURN(lum_size);
3270
3271         OBD_ALLOC_LARGE(*kbuf, lum_size);
3272         if (*kbuf == NULL)
3273                 RETURN(-ENOMEM);
3274
3275         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3276                 OBD_FREE_LARGE(*kbuf, lum_size);
3277                 RETURN(-EFAULT);
3278         }
3279
3280         RETURN(lum_size);
3281 }
3282
3283 /*
3284  * Compute llite root squash state after a change of root squash
3285  * configuration setting or add/remove of a lnet nid
3286  */
3287 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3288 {
3289         struct root_squash_info *squash = &sbi->ll_squash;
3290         int i;
3291         bool matched;
3292         struct lnet_process_id id;
3293
3294         /* Update norootsquash flag */
3295         spin_lock(&squash->rsi_lock);
3296         if (list_empty(&squash->rsi_nosquash_nids))
3297                 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3298         else {
3299                 /* Do not apply root squash as soon as one of our NIDs is
3300                  * in the nosquash_nids list */
3301                 matched = false;
3302                 i = 0;
3303                 while (LNetGetId(i++, &id) != -ENOENT) {
3304                         if (id.nid == LNET_NID_LO_0)
3305                                 continue;
3306                         if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
3307                                 matched = true;
3308                                 break;
3309                         }
3310                 }
3311                 if (matched)
3312                         sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
3313                 else
3314                         sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3315         }
3316         spin_unlock(&squash->rsi_lock);
3317 }
3318
3319 /**
3320  * Parse linkea content to extract information about a given hardlink
3321  *
3322  * \param[in]   ldata      - Initialized linkea data
3323  * \param[in]   linkno     - Link identifier
3324  * \param[out]  parent_fid - The entry's parent FID
3325  * \param[out]  ln         - Entry name destination buffer
3326  *
3327  * \retval 0 on success
3328  * \retval Appropriate negative error code on failure
3329  */
3330 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3331                             struct lu_fid *parent_fid, struct lu_name *ln)
3332 {
3333         unsigned int    idx;
3334         int             rc;
3335         ENTRY;
3336
3337         rc = linkea_init_with_rec(ldata);
3338         if (rc < 0)
3339                 RETURN(rc);
3340
3341         if (linkno >= ldata->ld_leh->leh_reccount)
3342                 /* beyond last link */
3343                 RETURN(-ENODATA);
3344
3345         linkea_first_entry(ldata);
3346         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3347                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3348                                     parent_fid);
3349                 if (idx == linkno)
3350                         break;
3351
3352                 linkea_next_entry(ldata);
3353         }
3354
3355         if (idx < linkno)
3356                 RETURN(-ENODATA);
3357
3358         RETURN(0);
3359 }
3360
3361 /**
3362  * Get parent FID and name of an identified link. Operation is performed for
3363  * a given link number, letting the caller iterate over linkno to list one or
3364  * all links of an entry.
3365  *
3366  * \param[in]     file - File descriptor against which to perform the operation
3367  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3368  *                       on and the available size. It is eventually filled with
3369  *                       the requested information or left untouched on error
3370  *
3371  * \retval - 0 on success
3372  * \retval - Appropriate negative error code on failure
3373  */
3374 int ll_getparent(struct file *file, struct getparent __user *arg)
3375 {
3376         struct inode            *inode = file_inode(file);
3377         struct linkea_data      *ldata;
3378         struct lu_buf            buf = LU_BUF_NULL;
3379         struct lu_name           ln;
3380         struct lu_fid            parent_fid;
3381         __u32                    linkno;
3382         __u32                    name_size;
3383         int                      rc;
3384
3385         ENTRY;
3386
3387         if (!capable(CAP_DAC_READ_SEARCH) &&
3388             !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
3389                 RETURN(-EPERM);
3390
3391         if (get_user(name_size, &arg->gp_name_size))
3392                 RETURN(-EFAULT);
3393
3394         if (get_user(linkno, &arg->gp_linkno))
3395                 RETURN(-EFAULT);
3396
3397         if (name_size > PATH_MAX)
3398                 RETURN(-EINVAL);
3399
3400         OBD_ALLOC(ldata, sizeof(*ldata));
3401         if (ldata == NULL)
3402                 RETURN(-ENOMEM);
3403
3404         rc = linkea_data_new(ldata, &buf);
3405         if (rc < 0)
3406                 GOTO(ldata_free, rc);
3407
3408         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3409                            buf.lb_len, OBD_MD_FLXATTR);
3410         if (rc < 0)
3411                 GOTO(lb_free, rc);
3412
3413         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3414         if (rc < 0)
3415                 GOTO(lb_free, rc);
3416
3417         if (ln.ln_namelen >= name_size)
3418                 GOTO(lb_free, rc = -EOVERFLOW);
3419
3420         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3421                 GOTO(lb_free, rc = -EFAULT);
3422
3423         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3424                 GOTO(lb_free, rc = -EFAULT);
3425
3426         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3427                 GOTO(lb_free, rc = -EFAULT);
3428
3429 lb_free:
3430         lu_buf_free(&buf);
3431 ldata_free:
3432         OBD_FREE(ldata, sizeof(*ldata));
3433
3434         RETURN(rc);
3435 }