Whamcloud - gitweb
109c45b15c23304b345beb0bd00afa294f29d326
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/llite/llite_lib.c
32  *
33  * Lustre Light Super operations
34  */
35
36 #define DEBUG_SUBSYSTEM S_LLITE
37
38 #include <linux/cpu.h>
39 #include <linux/module.h>
40 #include <linux/random.h>
41 #include <linux/statfs.h>
42 #include <linux/time.h>
43 #include <linux/types.h>
44 #include <libcfs/linux/linux-uuid.h>
45 #include <linux/version.h>
46 #include <linux/mm.h>
47 #include <linux/user_namespace.h>
48 #include <linux/delay.h>
49 #include <linux/uidgid.h>
50 #include <linux/security.h>
51 #include <linux/fs_struct.h>
52
53 #ifndef HAVE_CPUS_READ_LOCK
54 #include <libcfs/linux/linux-cpu.h>
55 #endif
56 #include <uapi/linux/lustre/lustre_ioctl.h>
57 #ifdef HAVE_UAPI_LINUX_MOUNT_H
58 #include <uapi/linux/mount.h>
59 #endif
60
61 #include <lustre_ha.h>
62 #include <lustre_dlm.h>
63 #include <lprocfs_status.h>
64 #include <lustre_disk.h>
65 #include <uapi/linux/lustre/lustre_param.h>
66 #include <lustre_log.h>
67 #include <cl_object.h>
68 #include <obd_cksum.h>
69 #include "llite_internal.h"
70
71 struct kmem_cache *ll_file_data_slab;
72
73 #ifndef log2
74 #define log2(n) ffz(~(n))
75 #endif
76
77 /**
78  * If there is only one number of core visible to Lustre,
79  * async readahead will be disabled, to avoid massive over
80  * subscription, we use 1/2 of active cores as default max
81  * async readahead requests.
82  */
83 static inline unsigned int ll_get_ra_async_max_active(void)
84 {
85         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
86 }
87
88 static struct ll_sb_info *ll_init_sbi(void)
89 {
90         struct ll_sb_info *sbi = NULL;
91         unsigned long pages;
92         unsigned long lru_page_max;
93         struct sysinfo si;
94         int rc;
95         int i;
96
97         ENTRY;
98
99         OBD_ALLOC_PTR(sbi);
100         if (sbi == NULL)
101                 RETURN(ERR_PTR(-ENOMEM));
102
103         rc = pcc_super_init(&sbi->ll_pcc_super);
104         if (rc < 0)
105                 GOTO(out_sbi, rc);
106
107         spin_lock_init(&sbi->ll_lock);
108         mutex_init(&sbi->ll_lco.lco_lock);
109         spin_lock_init(&sbi->ll_pp_extent_lock);
110         spin_lock_init(&sbi->ll_process_lock);
111         sbi->ll_rw_stats_on = 0;
112         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
113
114         si_meminfo(&si);
115         pages = si.totalram - si.totalhigh;
116         lru_page_max = pages / 2;
117
118         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
119         sbi->ll_ra_info.ll_readahead_wq =
120                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
121                                        0, CFS_CPT_ANY,
122                                        sbi->ll_ra_info.ra_async_max_active);
123         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
124                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
125
126         /* initialize ll_cache data */
127         sbi->ll_cache = cl_cache_init(lru_page_max);
128         if (sbi->ll_cache == NULL)
129                 GOTO(out_destroy_ra, rc = -ENOMEM);
130
131         /* initialize foreign symlink prefix path */
132         OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
133         if (sbi->ll_foreign_symlink_prefix == NULL)
134                 GOTO(out_destroy_ra, rc = -ENOMEM);
135         memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/"));
136         sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/");
137
138         /* initialize foreign symlink upcall path, none by default */
139         OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none"));
140         if (sbi->ll_foreign_symlink_upcall == NULL)
141                 GOTO(out_destroy_ra, rc = -ENOMEM);
142         memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none"));
143         sbi->ll_foreign_symlink_upcall_items = NULL;
144         sbi->ll_foreign_symlink_upcall_nb_items = 0;
145         init_rwsem(&sbi->ll_foreign_symlink_sem);
146         /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags)
147          * not enabled by default
148          */
149
150         sbi->ll_ra_info.ra_max_pages =
151                 min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
152         sbi->ll_ra_info.ra_max_pages_per_file =
153                 min(sbi->ll_ra_info.ra_max_pages / 4,
154                     SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
155         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
156                                 sbi->ll_ra_info.ra_max_pages_per_file;
157         sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
158         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
159         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
160
161         sbi->ll_flags |= LL_SBI_VERBOSE;
162 #ifdef ENABLE_CHECKSUM
163         sbi->ll_flags |= LL_SBI_CHECKSUM;
164 #endif
165 #ifdef ENABLE_FLOCK
166         sbi->ll_flags |= LL_SBI_FLOCK;
167 #endif
168
169 #ifdef HAVE_LRU_RESIZE_SUPPORT
170         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
171 #endif
172         sbi->ll_flags |= LL_SBI_LAZYSTATFS;
173
174         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
175                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
176                                pp_r_hist.oh_lock);
177                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
178                                pp_w_hist.oh_lock);
179         }
180
181         /* metadata statahead is enabled by default */
182         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
183         sbi->ll_sa_max = LL_SA_RPC_DEF;
184         atomic_set(&sbi->ll_sa_total, 0);
185         atomic_set(&sbi->ll_sa_wrong, 0);
186         atomic_set(&sbi->ll_sa_running, 0);
187         atomic_set(&sbi->ll_agl_total, 0);
188         sbi->ll_flags |= LL_SBI_AGL_ENABLED;
189         sbi->ll_flags |= LL_SBI_FAST_READ;
190         sbi->ll_flags |= LL_SBI_TINY_WRITE;
191         ll_sbi_set_encrypt(sbi, true);
192
193         /* root squash */
194         sbi->ll_squash.rsi_uid = 0;
195         sbi->ll_squash.rsi_gid = 0;
196         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
197         spin_lock_init(&sbi->ll_squash.rsi_lock);
198
199         /* Per-filesystem file heat */
200         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
201         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
202         RETURN(sbi);
203 out_destroy_ra:
204         if (sbi->ll_foreign_symlink_prefix)
205                 OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
206         if (sbi->ll_cache) {
207                 cl_cache_decref(sbi->ll_cache);
208                 sbi->ll_cache = NULL;
209         }
210         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
211 out_pcc:
212         pcc_super_fini(&sbi->ll_pcc_super);
213 out_sbi:
214         OBD_FREE_PTR(sbi);
215         RETURN(ERR_PTR(rc));
216 }
217
218 static void ll_free_sbi(struct super_block *sb)
219 {
220         struct ll_sb_info *sbi = ll_s2sbi(sb);
221         ENTRY;
222
223         if (sbi != NULL) {
224                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
225                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
226                 if (sbi->ll_ra_info.ll_readahead_wq)
227                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
228                 if (sbi->ll_cache != NULL) {
229                         cl_cache_decref(sbi->ll_cache);
230                         sbi->ll_cache = NULL;
231                 }
232                 if (sbi->ll_foreign_symlink_prefix) {
233                         OBD_FREE(sbi->ll_foreign_symlink_prefix,
234                                  sbi->ll_foreign_symlink_prefix_size);
235                         sbi->ll_foreign_symlink_prefix = NULL;
236                 }
237                 if (sbi->ll_foreign_symlink_upcall) {
238                         OBD_FREE(sbi->ll_foreign_symlink_upcall,
239                                  strlen(sbi->ll_foreign_symlink_upcall) +
240                                        1);
241                         sbi->ll_foreign_symlink_upcall = NULL;
242                 }
243                 if (sbi->ll_foreign_symlink_upcall_items) {
244                         int i;
245                         int nb_items = sbi->ll_foreign_symlink_upcall_nb_items;
246                         struct ll_foreign_symlink_upcall_item *items =
247                                 sbi->ll_foreign_symlink_upcall_items;
248
249                         for (i = 0 ; i < nb_items; i++)
250                                 if (items[i].type == STRING_TYPE)
251                                         OBD_FREE(items[i].string,
252                                                        items[i].size);
253
254                         OBD_FREE_LARGE(items, nb_items *
255                                 sizeof(struct ll_foreign_symlink_upcall_item));
256                         sbi->ll_foreign_symlink_upcall_items = NULL;
257                 }
258                 pcc_super_fini(&sbi->ll_pcc_super);
259                 OBD_FREE(sbi, sizeof(*sbi));
260         }
261         EXIT;
262 }
263
264 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
265 {
266         struct inode *root = NULL;
267         struct ll_sb_info *sbi = ll_s2sbi(sb);
268         struct obd_statfs *osfs = NULL;
269         struct ptlrpc_request *request = NULL;
270         struct obd_connect_data *data = NULL;
271         struct obd_uuid *uuid;
272         struct md_op_data *op_data;
273         struct lustre_md lmd;
274         u64 valid;
275         int size, err, checksum;
276
277         ENTRY;
278         sbi->ll_md_obd = class_name2obd(md);
279         if (!sbi->ll_md_obd) {
280                 CERROR("MD %s: not setup or attached\n", md);
281                 RETURN(-EINVAL);
282         }
283
284         OBD_ALLOC_PTR(data);
285         if (data == NULL)
286                 RETURN(-ENOMEM);
287
288         OBD_ALLOC_PTR(osfs);
289         if (osfs == NULL) {
290                 OBD_FREE_PTR(data);
291                 RETURN(-ENOMEM);
292         }
293
294         /* pass client page size via ocd_grant_blkbits, the server should report
295          * back its backend blocksize for grant calculation purpose */
296         data->ocd_grant_blkbits = PAGE_SHIFT;
297
298         /* indicate MDT features supported by this client */
299         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
300                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
301                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
302                                   OBD_CONNECT_SRVLOCK  | OBD_CONNECT_TRUNCLOCK|
303                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
304                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
305                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
306                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
307                                   OBD_CONNECT_64BITHASH |
308                                   OBD_CONNECT_EINPROGRESS |
309                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
310                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
311                                   OBD_CONNECT_MAX_EASIZE |
312                                   OBD_CONNECT_FLOCK_DEAD |
313                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
314                                   OBD_CONNECT_OPEN_BY_FID |
315                                   OBD_CONNECT_DIR_STRIPE |
316                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
317                                   OBD_CONNECT_SUBTREE |
318                                   OBD_CONNECT_MULTIMODRPCS |
319                                   OBD_CONNECT_GRANT_PARAM |
320                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
321
322         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
323                                    OBD_CONNECT2_SUM_STATFS |
324                                    OBD_CONNECT2_OVERSTRIPING |
325                                    OBD_CONNECT2_FLR |
326                                    OBD_CONNECT2_LOCK_CONVERT |
327                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
328                                    OBD_CONNECT2_INC_XID |
329                                    OBD_CONNECT2_LSOM |
330                                    OBD_CONNECT2_ASYNC_DISCARD |
331                                    OBD_CONNECT2_PCC |
332                                    OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
333                                    OBD_CONNECT2_GETATTR_PFID |
334                                    OBD_CONNECT2_DOM_LVB;
335
336 #ifdef HAVE_LRU_RESIZE_SUPPORT
337         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
338                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
339 #endif
340         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
341
342         data->ocd_cksum_types = obd_cksum_types_supported_client();
343
344         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
345                 /* flag mdc connection as lightweight, only used for test
346                  * purpose, use with care */
347                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
348
349         data->ocd_ibits_known = MDS_INODELOCK_FULL;
350         data->ocd_version = LUSTRE_VERSION_CODE;
351
352         if (sb->s_flags & SB_RDONLY)
353                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
354         if (sbi->ll_flags & LL_SBI_USER_XATTR)
355                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
356
357 #ifdef SB_NOSEC
358         /* Setting this indicates we correctly support S_NOSEC (See kernel
359          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
360          */
361         sb->s_flags |= SB_NOSEC;
362 #endif
363         sbi->ll_fop = ll_select_file_operations(sbi);
364
365         /* always ping even if server suppress_pings */
366         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
367                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
368
369         obd_connect_set_secctx(data);
370         if (ll_sbi_has_encrypt(sbi))
371                 obd_connect_set_enc(data);
372
373 #if defined(CONFIG_SECURITY)
374         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
375 #endif
376
377         data->ocd_brw_size = MD_MAX_BRW_SIZE;
378
379         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
380                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
381         if (err == -EBUSY) {
382                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
383                                    "recovery, of which this client is not a "
384                                    "part. Please wait for recovery to complete,"
385                                    " abort, or time out.\n", md);
386                 GOTO(out, err);
387         } else if (err) {
388                 CERROR("cannot connect to %s: rc = %d\n", md, err);
389                 GOTO(out, err);
390         }
391
392         sbi->ll_md_exp->exp_connect_data = *data;
393
394         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
395                            LUSTRE_SEQ_METADATA);
396         if (err) {
397                 CERROR("%s: Can't init metadata layer FID infrastructure, "
398                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
399                 GOTO(out_md, err);
400         }
401
402         /* For mount, we only need fs info from MDT0, and also in DNE, it
403          * can make sure the client can be mounted as long as MDT0 is
404          * avaible */
405         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
406                         ktime_get_seconds() - sbi->ll_statfs_max_age,
407                         OBD_STATFS_FOR_MDT0);
408         if (err)
409                 GOTO(out_md_fid, err);
410
411         /* This needs to be after statfs to ensure connect has finished.
412          * Note that "data" does NOT contain the valid connect reply.
413          * If connecting to a 1.8 server there will be no LMV device, so
414          * we can access the MDC export directly and exp_connect_flags will
415          * be non-zero, but if accessing an upgraded 2.1 server it will
416          * have the correct flags filled in.
417          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
418         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
419         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
420             valid != CLIENT_CONNECT_MDT_REQD) {
421                 char *buf;
422
423                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
424                 obd_connect_flags2str(buf, PAGE_SIZE,
425                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
426                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
427                                    "feature(s) needed for correct operation "
428                                    "of this client (%s). Please upgrade "
429                                    "server or downgrade client.\n",
430                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
431                 OBD_FREE(buf, PAGE_SIZE);
432                 GOTO(out_md_fid, err = -EPROTO);
433         }
434
435         size = sizeof(*data);
436         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
437                            KEY_CONN_DATA,  &size, data);
438         if (err) {
439                 CERROR("%s: Get connect data failed: rc = %d\n",
440                        sbi->ll_md_exp->exp_obd->obd_name, err);
441                 GOTO(out_md_fid, err);
442         }
443
444         LASSERT(osfs->os_bsize);
445         sb->s_blocksize = osfs->os_bsize;
446         sb->s_blocksize_bits = log2(osfs->os_bsize);
447         sb->s_magic = LL_SUPER_MAGIC;
448         sb->s_maxbytes = MAX_LFS_FILESIZE;
449         sbi->ll_namelen = osfs->os_namelen;
450         sbi->ll_mnt.mnt = current->fs->root.mnt;
451
452         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
453             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
454                 LCONSOLE_INFO("Disabling user_xattr feature because "
455                               "it is not supported on the server\n");
456                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
457         }
458
459         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
460 #ifdef SB_POSIXACL
461                 sb->s_flags |= SB_POSIXACL;
462 #endif
463                 sbi->ll_flags |= LL_SBI_ACL;
464         } else {
465                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
466 #ifdef SB_POSIXACL
467                 sb->s_flags &= ~SB_POSIXACL;
468 #endif
469                 sbi->ll_flags &= ~LL_SBI_ACL;
470         }
471
472         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
473                 sbi->ll_flags |= LL_SBI_64BIT_HASH;
474
475         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
476                 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
477
478         if (obd_connect_has_secctx(data))
479                 sbi->ll_flags |= LL_SBI_FILE_SECCTX;
480
481         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
482                 if (ll_sbi_has_test_dummy_encryption(sbi))
483                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
484                                       sbi->ll_fsname,
485                                       sbi->ll_md_exp->exp_obd->obd_name);
486                 ll_sbi_set_encrypt(sbi, false);
487         }
488
489         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
490                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
491                         LCONSOLE_INFO("%s: disabling xattr cache due to "
492                                       "unknown maximum xattr size.\n", dt);
493                 } else if (!sbi->ll_xattr_cache_set) {
494                         /* If xattr_cache is already set (no matter 0 or 1)
495                          * during processing llog, it won't be enabled here. */
496                         sbi->ll_flags |= LL_SBI_XATTR_CACHE;
497                         sbi->ll_xattr_cache_enabled = 1;
498                 }
499         }
500
501         sbi->ll_dt_obd = class_name2obd(dt);
502         if (!sbi->ll_dt_obd) {
503                 CERROR("DT %s: not setup or attached\n", dt);
504                 GOTO(out_md_fid, err = -ENODEV);
505         }
506
507         /* pass client page size via ocd_grant_blkbits, the server should report
508          * back its backend blocksize for grant calculation purpose */
509         data->ocd_grant_blkbits = PAGE_SHIFT;
510
511         /* indicate OST features supported by this client */
512         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
513                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
514                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
515                                   OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
516                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
517                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
518                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
519                                   OBD_CONNECT_EINPROGRESS |
520                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
521                                   OBD_CONNECT_LAYOUTLOCK |
522                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
523                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
524                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
525         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
526                                    OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK;
527
528         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
529                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
530
531         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
532          * disabled by default, because it can still be enabled on the
533          * fly via /sys. As a consequence, we still need to come to an
534          * agreement on the supported algorithms at connect time
535          */
536         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
537
538         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
539                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
540         else
541                 data->ocd_cksum_types = obd_cksum_types_supported_client();
542
543 #ifdef HAVE_LRU_RESIZE_SUPPORT
544         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
545 #endif
546         /* always ping even if server suppress_pings */
547         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
548                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
549
550         if (ll_sbi_has_encrypt(sbi))
551                 obd_connect_set_enc(data);
552
553         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
554                "ocd_grant: %d\n", data->ocd_connect_flags,
555                data->ocd_version, data->ocd_grant);
556
557         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
558         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
559
560         data->ocd_brw_size = DT_MAX_BRW_SIZE;
561
562         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
563                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
564         if (err == -EBUSY) {
565                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
566                                    "recovery, of which this client is not a "
567                                    "part.  Please wait for recovery to "
568                                    "complete, abort, or time out.\n", dt);
569                 GOTO(out_md, err);
570         } else if (err) {
571                 CERROR("%s: Cannot connect to %s: rc = %d\n",
572                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
573                 GOTO(out_md, err);
574         }
575
576         if (ll_sbi_has_encrypt(sbi) &&
577             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
578                 if (ll_sbi_has_test_dummy_encryption(sbi))
579                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
580                                       sbi->ll_fsname, dt);
581                 ll_sbi_set_encrypt(sbi, false);
582         } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
583                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
584         }
585
586         sbi->ll_dt_exp->exp_connect_data = *data;
587
588         /* Don't change value if it was specified in the config log */
589         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
590                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
591                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
592                               (data->ocd_brw_size >> PAGE_SHIFT));
593                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
594                     sbi->ll_ra_info.ra_max_pages_per_file)
595                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
596                                 sbi->ll_ra_info.ra_max_pages_per_file;
597         }
598
599         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
600                            LUSTRE_SEQ_METADATA);
601         if (err) {
602                 CERROR("%s: Can't init data layer FID infrastructure, "
603                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
604                 GOTO(out_dt, err);
605         }
606
607         mutex_lock(&sbi->ll_lco.lco_lock);
608         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
609         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
610         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
611         mutex_unlock(&sbi->ll_lco.lco_lock);
612
613         fid_zero(&sbi->ll_root_fid);
614         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
615                            &sbi->ll_root_fid);
616         if (err) {
617                 CERROR("cannot mds_connect: rc = %d\n", err);
618                 GOTO(out_lock_cn_cb, err);
619         }
620         if (!fid_is_sane(&sbi->ll_root_fid)) {
621                 CERROR("%s: Invalid root fid "DFID" during mount\n",
622                        sbi->ll_md_exp->exp_obd->obd_name,
623                        PFID(&sbi->ll_root_fid));
624                 GOTO(out_lock_cn_cb, err = -EINVAL);
625         }
626         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
627
628         sb->s_op = &lustre_super_operations;
629         sb->s_xattr = ll_xattr_handlers;
630 #if THREAD_SIZE >= 8192 /*b=17630*/
631         sb->s_export_op = &lustre_export_operations;
632 #endif
633 #ifdef HAVE_LUSTRE_CRYPTO
634         llcrypt_set_ops(sb, &lustre_cryptops);
635 #endif
636
637         /* make root inode
638          * XXX: move this to after cbd setup? */
639         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
640         if (sbi->ll_flags & LL_SBI_ACL)
641                 valid |= OBD_MD_FLACL;
642
643         OBD_ALLOC_PTR(op_data);
644         if (op_data == NULL)
645                 GOTO(out_lock_cn_cb, err = -ENOMEM);
646
647         op_data->op_fid1 = sbi->ll_root_fid;
648         op_data->op_mode = 0;
649         op_data->op_valid = valid;
650
651         err = md_getattr(sbi->ll_md_exp, op_data, &request);
652
653         OBD_FREE_PTR(op_data);
654         if (err) {
655                 CERROR("%s: md_getattr failed for root: rc = %d\n",
656                        sbi->ll_md_exp->exp_obd->obd_name, err);
657                 GOTO(out_lock_cn_cb, err);
658         }
659
660         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
661                                sbi->ll_md_exp, &lmd);
662         if (err) {
663                 CERROR("failed to understand root inode md: rc = %d\n", err);
664                 ptlrpc_req_finished(request);
665                 GOTO(out_lock_cn_cb, err);
666         }
667
668         LASSERT(fid_is_sane(&sbi->ll_root_fid));
669         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
670                                             sbi->ll_flags & LL_SBI_32BIT_API),
671                        &lmd);
672         md_free_lustre_md(sbi->ll_md_exp, &lmd);
673         ptlrpc_req_finished(request);
674
675         if (IS_ERR(root)) {
676                 lmd_clear_acl(&lmd);
677                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
678                 root = NULL;
679                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
680                        sbi->ll_fsname, err);
681                 GOTO(out_root, err);
682         }
683
684         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
685         if (sbi->ll_checksum_set) {
686                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
687                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
688                                          sizeof(checksum), &checksum, NULL);
689                 if (err) {
690                         CERROR("%s: Set checksum failed: rc = %d\n",
691                                sbi->ll_dt_exp->exp_obd->obd_name, err);
692                         GOTO(out_root, err);
693                 }
694         }
695         cl_sb_init(sb);
696
697         sb->s_root = d_make_root(root);
698         if (sb->s_root == NULL) {
699                 err = -ENOMEM;
700                 CERROR("%s: can't make root dentry: rc = %d\n",
701                        sbi->ll_fsname, err);
702                 GOTO(out_root, err);
703         }
704
705         sbi->ll_sdev_orig = sb->s_dev;
706
707         /* We set sb->s_dev equal on all lustre clients in order to support
708          * NFS export clustering.  NFSD requires that the FSID be the same
709          * on all clients. */
710         /* s_dev is also used in lt_compare() to compare two fs, but that is
711          * only a node-local comparison. */
712         uuid = obd_get_uuid(sbi->ll_md_exp);
713         if (uuid != NULL)
714                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
715
716         if (data != NULL)
717                 OBD_FREE_PTR(data);
718         if (osfs != NULL)
719                 OBD_FREE_PTR(osfs);
720
721         if (sbi->ll_dt_obd) {
722                 err = sysfs_create_link(&sbi->ll_kset.kobj,
723                                         &sbi->ll_dt_obd->obd_kset.kobj,
724                                         sbi->ll_dt_obd->obd_type->typ_name);
725                 if (err < 0) {
726                         CERROR("%s: could not register %s in llite: rc = %d\n",
727                                dt, sbi->ll_fsname, err);
728                         err = 0;
729                 }
730         }
731
732         if (sbi->ll_md_obd) {
733                 err = sysfs_create_link(&sbi->ll_kset.kobj,
734                                         &sbi->ll_md_obd->obd_kset.kobj,
735                                         sbi->ll_md_obd->obd_type->typ_name);
736                 if (err < 0) {
737                         CERROR("%s: could not register %s in llite: rc = %d\n",
738                                md, sbi->ll_fsname, err);
739                         err = 0;
740                 }
741         }
742
743         RETURN(err);
744 out_root:
745         iput(root);
746 out_lock_cn_cb:
747         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
748 out_dt:
749         obd_disconnect(sbi->ll_dt_exp);
750         sbi->ll_dt_exp = NULL;
751         sbi->ll_dt_obd = NULL;
752 out_md_fid:
753         obd_fid_fini(sbi->ll_md_exp->exp_obd);
754 out_md:
755         obd_disconnect(sbi->ll_md_exp);
756         sbi->ll_md_exp = NULL;
757         sbi->ll_md_obd = NULL;
758 out:
759         if (data != NULL)
760                 OBD_FREE_PTR(data);
761         if (osfs != NULL)
762                 OBD_FREE_PTR(osfs);
763         return err;
764 }
765
766 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
767 {
768         int size, rc;
769
770         size = sizeof(*lmmsize);
771         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
772                           KEY_MAX_EASIZE, &size, lmmsize);
773         if (rc != 0) {
774                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
775                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
776                 RETURN(rc);
777         }
778
779         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
780
781         size = sizeof(int);
782         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
783                           KEY_MAX_EASIZE, &size, lmmsize);
784         if (rc)
785                 CERROR("Get max mdsize error rc %d\n", rc);
786
787         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
788
789         RETURN(rc);
790 }
791
792 /**
793  * Get the value of the default_easize parameter.
794  *
795  * \see client_obd::cl_default_mds_easize
796  *
797  * \param[in] sbi       superblock info for this filesystem
798  * \param[out] lmmsize  pointer to storage location for value
799  *
800  * \retval 0            on success
801  * \retval negative     negated errno on failure
802  */
803 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
804 {
805         int size, rc;
806
807         size = sizeof(int);
808         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
809                          KEY_DEFAULT_EASIZE, &size, lmmsize);
810         if (rc)
811                 CERROR("Get default mdsize error rc %d\n", rc);
812
813         RETURN(rc);
814 }
815
816 /**
817  * Set the default_easize parameter to the given value.
818  *
819  * \see client_obd::cl_default_mds_easize
820  *
821  * \param[in] sbi       superblock info for this filesystem
822  * \param[in] lmmsize   the size to set
823  *
824  * \retval 0            on success
825  * \retval negative     negated errno on failure
826  */
827 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
828 {
829         int rc;
830
831         if (lmmsize < sizeof(struct lov_mds_md) ||
832             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
833                 return -EINVAL;
834
835         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
836                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
837                                 sizeof(int), &lmmsize, NULL);
838
839         RETURN(rc);
840 }
841
842 static void client_common_put_super(struct super_block *sb)
843 {
844         struct ll_sb_info *sbi = ll_s2sbi(sb);
845         ENTRY;
846
847         cl_sb_fini(sb);
848
849         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
850         obd_disconnect(sbi->ll_dt_exp);
851         sbi->ll_dt_exp = NULL;
852
853         ll_debugfs_unregister_super(sb);
854
855         obd_fid_fini(sbi->ll_md_exp->exp_obd);
856         obd_disconnect(sbi->ll_md_exp);
857         sbi->ll_md_exp = NULL;
858
859         EXIT;
860 }
861
862 void ll_kill_super(struct super_block *sb)
863 {
864         struct ll_sb_info *sbi;
865         ENTRY;
866
867         /* not init sb ?*/
868         if (!(sb->s_flags & SB_ACTIVE))
869                 return;
870
871         sbi = ll_s2sbi(sb);
872         /* we need restore s_dev from changed for clustred NFS before put_super
873          * because new kernels have cached s_dev and change sb->s_dev in
874          * put_super not affected real removing devices */
875         if (sbi) {
876                 sb->s_dev = sbi->ll_sdev_orig;
877
878                 /* wait running statahead threads to quit */
879                 while (atomic_read(&sbi->ll_sa_running) > 0)
880                         schedule_timeout_uninterruptible(
881                                 cfs_time_seconds(1) >> 3);
882         }
883
884         EXIT;
885 }
886
887 static inline int ll_set_opt(const char *opt, char *data, int fl)
888 {
889         if (strncmp(opt, data, strlen(opt)) != 0)
890                 return 0;
891         else
892                 return fl;
893 }
894
895 /* non-client-specific mount options are parsed in lmd_parse */
896 static int ll_options(char *options, struct ll_sb_info *sbi)
897 {
898         int tmp;
899         char *s1 = options, *s2;
900         int *flags = &sbi->ll_flags;
901         ENTRY;
902
903         if (!options)
904                 RETURN(0);
905
906         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
907
908         while (*s1) {
909                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
910                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
911                 if (tmp) {
912                         *flags |= tmp;
913                         goto next;
914                 }
915                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
916                 if (tmp) {
917                         *flags = (*flags & ~LL_SBI_LOCALFLOCK) | tmp;
918                         goto next;
919                 }
920                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
921                 if (tmp) {
922                         *flags = (*flags & ~LL_SBI_FLOCK) | tmp;
923                         goto next;
924                 }
925                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
926                 if (tmp) {
927                         *flags &= ~tmp;
928                         goto next;
929                 }
930                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
931                 if (tmp) {
932                         *flags |= tmp;
933                         goto next;
934                 }
935                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
936                 if (tmp) {
937                         *flags &= ~tmp;
938                         goto next;
939                 }
940                 tmp = ll_set_opt("context", s1, 1);
941                 if (tmp)
942                         goto next;
943                 tmp = ll_set_opt("fscontext", s1, 1);
944                 if (tmp)
945                         goto next;
946                 tmp = ll_set_opt("defcontext", s1, 1);
947                 if (tmp)
948                         goto next;
949                 tmp = ll_set_opt("rootcontext", s1, 1);
950                 if (tmp)
951                         goto next;
952                 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
953                 if (tmp) {
954                         *flags |= tmp;
955                         goto next;
956                 }
957                 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
958                 if (tmp) {
959                         *flags &= ~tmp;
960                         goto next;
961                 }
962
963                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
964                 if (tmp) {
965                         *flags |= tmp;
966                         sbi->ll_checksum_set = 1;
967                         goto next;
968                 }
969                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
970                 if (tmp) {
971                         *flags &= ~tmp;
972                         sbi->ll_checksum_set = 1;
973                         goto next;
974                 }
975                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
976                 if (tmp) {
977                         *flags |= tmp;
978                         goto next;
979                 }
980                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
981                 if (tmp) {
982                         *flags &= ~tmp;
983                         goto next;
984                 }
985                 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
986                 if (tmp) {
987                         *flags |= tmp;
988                         goto next;
989                 }
990                 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
991                 if (tmp) {
992                         *flags &= ~tmp;
993                         goto next;
994                 }
995                 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
996                 if (tmp) {
997                         *flags |= tmp;
998                         goto next;
999                 }
1000                 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
1001                 if (tmp) {
1002                         *flags |= tmp;
1003                         goto next;
1004                 }
1005                 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
1006                 if (tmp) {
1007                         *flags &= ~tmp;
1008                         goto next;
1009                 }
1010                 tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
1011                 if (tmp) {
1012                         *flags |= tmp;
1013                         goto next;
1014                 }
1015                 tmp = ll_set_opt("test_dummy_encryption", s1,
1016                                  LL_SBI_TEST_DUMMY_ENCRYPTION);
1017                 if (tmp) {
1018 #ifdef HAVE_LUSTRE_CRYPTO
1019                         *flags |= tmp;
1020 #else
1021                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
1022 #endif
1023                         goto next;
1024                 }
1025                 tmp = ll_set_opt("noencrypt", s1, LL_SBI_ENCRYPT);
1026                 if (tmp) {
1027 #ifdef HAVE_LUSTRE_CRYPTO
1028                         *flags &= ~tmp;
1029 #else
1030                         LCONSOLE_WARN("noencrypt mount option ignored: encryption not supported\n");
1031 #endif
1032                         goto next;
1033                 }
1034                 tmp = ll_set_opt("foreign_symlink", s1, LL_SBI_FOREIGN_SYMLINK);
1035                 if (tmp) {
1036                         int prefix_pos = sizeof("foreign_symlink=") - 1;
1037                         int equal_pos = sizeof("foreign_symlink=") - 2;
1038
1039                         /* non-default prefix provided ? */
1040                         if (strlen(s1) >= sizeof("foreign_symlink=") &&
1041                             *(s1 + equal_pos) == '=') {
1042                                 char *old = sbi->ll_foreign_symlink_prefix;
1043                                 size_t old_len =
1044                                         sbi->ll_foreign_symlink_prefix_size;
1045
1046                                 /* path must be absolute */
1047                                 if (*(s1 + sizeof("foreign_symlink=")
1048                                       - 1) != '/') {
1049                                         LCONSOLE_ERROR_MSG(0x152,
1050                                                 "foreign prefix '%s' must be an absolute path\n",
1051                                                 s1 + prefix_pos);
1052                                         RETURN(-EINVAL);
1053                                 }
1054                                 /* last option ? */
1055                                 s2 = strchrnul(s1 + prefix_pos, ',');
1056
1057                                 if (sbi->ll_foreign_symlink_prefix) {
1058                                         sbi->ll_foreign_symlink_prefix = NULL;
1059                                         sbi->ll_foreign_symlink_prefix_size = 0;
1060                                 }
1061                                 /* alloc for path length and '\0' */
1062                                 OBD_ALLOC(sbi->ll_foreign_symlink_prefix,
1063                                                 s2 - (s1 + prefix_pos) + 1);
1064                                 if (!sbi->ll_foreign_symlink_prefix) {
1065                                         /* restore previous */
1066                                         sbi->ll_foreign_symlink_prefix = old;
1067                                         sbi->ll_foreign_symlink_prefix_size =
1068                                                 old_len;
1069                                         RETURN(-ENOMEM);
1070                                 }
1071                                 if (old)
1072                                         OBD_FREE(old, old_len);
1073                                 strncpy(sbi->ll_foreign_symlink_prefix,
1074                                         s1 + prefix_pos,
1075                                         s2 - (s1 + prefix_pos));
1076                                 sbi->ll_foreign_symlink_prefix_size =
1077                                         s2 - (s1 + prefix_pos) + 1;
1078                         } else {
1079                                 LCONSOLE_ERROR_MSG(0x152,
1080                                                    "invalid %s option\n", s1);
1081                         }
1082                         /* enable foreign symlink support */
1083                         *flags |= tmp;
1084                         goto next;
1085                 }
1086                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
1087                                    s1);
1088                 RETURN(-EINVAL);
1089
1090 next:
1091                 /* Find next opt */
1092                 s2 = strchr(s1, ',');
1093                 if (s2 == NULL)
1094                         break;
1095                 s1 = s2 + 1;
1096         }
1097         RETURN(0);
1098 }
1099
1100 void ll_lli_init(struct ll_inode_info *lli)
1101 {
1102         lli->lli_inode_magic = LLI_INODE_MAGIC;
1103         lli->lli_flags = 0;
1104         spin_lock_init(&lli->lli_lock);
1105         lli->lli_posix_acl = NULL;
1106         /* Do not set lli_fid, it has been initialized already. */
1107         fid_zero(&lli->lli_pfid);
1108         lli->lli_mds_read_och = NULL;
1109         lli->lli_mds_write_och = NULL;
1110         lli->lli_mds_exec_och = NULL;
1111         lli->lli_open_fd_read_count = 0;
1112         lli->lli_open_fd_write_count = 0;
1113         lli->lli_open_fd_exec_count = 0;
1114         mutex_init(&lli->lli_och_mutex);
1115         spin_lock_init(&lli->lli_agl_lock);
1116         spin_lock_init(&lli->lli_layout_lock);
1117         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1118         lli->lli_clob = NULL;
1119
1120         init_rwsem(&lli->lli_xattrs_list_rwsem);
1121         mutex_init(&lli->lli_xattrs_enq_lock);
1122
1123         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1124         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1125                 lli->lli_opendir_key = NULL;
1126                 lli->lli_sai = NULL;
1127                 spin_lock_init(&lli->lli_sa_lock);
1128                 lli->lli_opendir_pid = 0;
1129                 lli->lli_sa_enabled = 0;
1130                 init_rwsem(&lli->lli_lsm_sem);
1131         } else {
1132                 mutex_init(&lli->lli_size_mutex);
1133                 mutex_init(&lli->lli_setattr_mutex);
1134                 lli->lli_symlink_name = NULL;
1135                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1136                 range_lock_tree_init(&lli->lli_write_tree);
1137                 init_rwsem(&lli->lli_glimpse_sem);
1138                 lli->lli_glimpse_time = ktime_set(0, 0);
1139                 INIT_LIST_HEAD(&lli->lli_agl_list);
1140                 lli->lli_agl_index = 0;
1141                 lli->lli_async_rc = 0;
1142                 spin_lock_init(&lli->lli_heat_lock);
1143                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1144                 lli->lli_heat_flags = 0;
1145                 mutex_init(&lli->lli_pcc_lock);
1146                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1147                 lli->lli_pcc_inode = NULL;
1148                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1149                 lli->lli_pcc_generation = 0;
1150                 mutex_init(&lli->lli_group_mutex);
1151                 lli->lli_group_users = 0;
1152                 lli->lli_group_gid = 0;
1153         }
1154         mutex_init(&lli->lli_layout_mutex);
1155         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1156 }
1157
1158 #define MAX_STRING_SIZE 128
1159
1160 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1161
1162 #define LSI_BDI_INITIALIZED     0x00400000
1163
1164 #ifndef HAVE_BDI_CAP_MAP_COPY
1165 # define BDI_CAP_MAP_COPY       0
1166 #endif
1167
1168 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1169 {
1170         struct  lustre_sb_info *lsi = s2lsi(sb);
1171         char buf[MAX_STRING_SIZE];
1172         va_list args;
1173         int err;
1174
1175         err = bdi_init(&lsi->lsi_bdi);
1176         if (err)
1177                 return err;
1178
1179         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1180         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1181         lsi->lsi_bdi.name = "lustre";
1182         va_start(args, fmt);
1183         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1184         va_end(args);
1185         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1186         va_end(args);
1187         if (!err)
1188                 sb->s_bdi = &lsi->lsi_bdi;
1189
1190         return err;
1191 }
1192 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1193
1194 int ll_fill_super(struct super_block *sb)
1195 {
1196         struct  lustre_profile *lprof = NULL;
1197         struct  lustre_sb_info *lsi = s2lsi(sb);
1198         struct  ll_sb_info *sbi = NULL;
1199         char    *dt = NULL, *md = NULL;
1200         char    *profilenm = get_profile_name(sb);
1201         struct config_llog_instance *cfg;
1202         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1203         const int instlen = LUSTRE_MAXINSTANCE + 2;
1204         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1205         char name[MAX_STRING_SIZE];
1206         int md_len = 0;
1207         int dt_len = 0;
1208         uuid_t uuid;
1209         char *ptr;
1210         int len;
1211         int err;
1212
1213         ENTRY;
1214         /* for ASLR, to map between cfg_instance and hashed ptr */
1215         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1216                profilenm, cfg_instance, sb);
1217
1218         OBD_RACE(OBD_FAIL_LLITE_RACE_MOUNT);
1219
1220         OBD_ALLOC_PTR(cfg);
1221         if (cfg == NULL)
1222                 GOTO(out_free_cfg, err = -ENOMEM);
1223
1224         /* client additional sb info */
1225         lsi->lsi_llsbi = sbi = ll_init_sbi();
1226         if (IS_ERR(sbi))
1227                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1228
1229         err = ll_options(lsi->lsi_lmd->lmd_opts, sbi);
1230         if (err)
1231                 GOTO(out_free_cfg, err);
1232
1233         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1234         sb->s_d_op = &ll_d_ops;
1235
1236         /* UUID handling */
1237         generate_random_uuid(uuid.b);
1238         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1239
1240         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1241
1242         /* Get fsname */
1243         len = strlen(profilenm);
1244         ptr = strrchr(profilenm, '-');
1245         if (ptr && (strcmp(ptr, "-client") == 0))
1246                 len -= 7;
1247
1248         if (len > LUSTRE_MAXFSNAME) {
1249                 if (unlikely(len >= MAX_STRING_SIZE))
1250                         len = MAX_STRING_SIZE - 1;
1251                 strncpy(name, profilenm, len);
1252                 name[len] = '\0';
1253                 err = -ENAMETOOLONG;
1254                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1255                        name, LUSTRE_MAXFSNAME, err);
1256                 GOTO(out_free_cfg, err);
1257         }
1258         strncpy(sbi->ll_fsname, profilenm, len);
1259         sbi->ll_fsname[len] = '\0';
1260
1261         /* Mount info */
1262         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1263                  profilenm, cfg_instance);
1264
1265         err = super_setup_bdi_name(sb, "%s", name);
1266         if (err)
1267                 GOTO(out_free_cfg, err);
1268
1269         /* Call ll_debugfs_register_super() before lustre_process_log()
1270          * so that "llite.*.*" params can be processed correctly.
1271          */
1272         err = ll_debugfs_register_super(sb, name);
1273         if (err < 0) {
1274                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1275                        sbi->ll_fsname, err);
1276                 err = 0;
1277         }
1278
1279         /* The cfg_instance is a value unique to this super, in case some
1280          * joker tries to mount the same fs at two mount points.
1281          */
1282         cfg->cfg_instance = cfg_instance;
1283         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1284         cfg->cfg_callback = class_config_llog_handler;
1285         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1286         /* set up client obds */
1287         err = lustre_process_log(sb, profilenm, cfg);
1288         if (err < 0)
1289                 GOTO(out_debugfs, err);
1290
1291         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1292         lprof = class_get_profile(profilenm);
1293         if (lprof == NULL) {
1294                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1295                                    " read from the MGS.  Does that filesystem "
1296                                    "exist?\n", profilenm);
1297                 GOTO(out_debugfs, err = -EINVAL);
1298         }
1299         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1300                lprof->lp_md, lprof->lp_dt);
1301
1302         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1303         OBD_ALLOC(dt, dt_len);
1304         if (!dt)
1305                 GOTO(out_profile, err = -ENOMEM);
1306         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1307
1308         md_len = strlen(lprof->lp_md) + instlen + 2;
1309         OBD_ALLOC(md, md_len);
1310         if (!md)
1311                 GOTO(out_free_dt, err = -ENOMEM);
1312         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1313
1314         /* connections, registrations, sb setup */
1315         err = client_common_fill_super(sb, md, dt);
1316         if (err < 0)
1317                 GOTO(out_free_md, err);
1318
1319         sbi->ll_client_common_fill_super_succeeded = 1;
1320
1321 out_free_md:
1322         if (md)
1323                 OBD_FREE(md, md_len);
1324 out_free_dt:
1325         if (dt)
1326                 OBD_FREE(dt, dt_len);
1327 out_profile:
1328         if (lprof)
1329                 class_put_profile(lprof);
1330 out_debugfs:
1331         if (err < 0)
1332                 ll_debugfs_unregister_super(sb);
1333 out_free_cfg:
1334         if (cfg)
1335                 OBD_FREE_PTR(cfg);
1336
1337         if (err)
1338                 ll_put_super(sb);
1339         else if (sbi->ll_flags & LL_SBI_VERBOSE)
1340                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1341         RETURN(err);
1342 } /* ll_fill_super */
1343
1344 void ll_put_super(struct super_block *sb)
1345 {
1346         struct config_llog_instance cfg, params_cfg;
1347         struct obd_device *obd;
1348         struct lustre_sb_info *lsi = s2lsi(sb);
1349         struct ll_sb_info *sbi = ll_s2sbi(sb);
1350         char *profilenm = get_profile_name(sb);
1351         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1352         long ccc_count;
1353         int next, force = 1, rc = 0;
1354         ENTRY;
1355
1356         if (IS_ERR(sbi))
1357                 GOTO(out_no_sbi, 0);
1358
1359         /* Should replace instance_id with something better for ASLR */
1360         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1361                profilenm, cfg_instance, sb);
1362
1363         cfg.cfg_instance = cfg_instance;
1364         lustre_end_log(sb, profilenm, &cfg);
1365
1366         params_cfg.cfg_instance = cfg_instance;
1367         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1368
1369         if (sbi->ll_md_exp) {
1370                 obd = class_exp2obd(sbi->ll_md_exp);
1371                 if (obd)
1372                         force = obd->obd_force;
1373         }
1374
1375         /* Wait for unstable pages to be committed to stable storage */
1376         if (force == 0) {
1377                 rc = l_wait_event_abortable(
1378                         sbi->ll_cache->ccc_unstable_waitq,
1379                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1380         }
1381
1382         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1383         if (force == 0 && rc != -ERESTARTSYS)
1384                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1385
1386         /* We need to set force before the lov_disconnect in
1387          * lustre_common_put_super, since l_d cleans up osc's as well.
1388          */
1389         if (force) {
1390                 next = 0;
1391                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1392                                                      &next)) != NULL) {
1393                         obd->obd_force = force;
1394                 }
1395         }
1396
1397         if (sbi->ll_client_common_fill_super_succeeded) {
1398                 /* Only if client_common_fill_super succeeded */
1399                 client_common_put_super(sb);
1400         }
1401
1402         next = 0;
1403         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1404                 class_manual_cleanup(obd);
1405
1406         if (sbi->ll_flags & LL_SBI_VERBOSE)
1407                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1408
1409         if (profilenm)
1410                 class_del_profile(profilenm);
1411
1412 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1413         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1414                 bdi_destroy(&lsi->lsi_bdi);
1415                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1416         }
1417 #endif
1418
1419         ll_free_sbi(sb);
1420         lsi->lsi_llsbi = NULL;
1421 out_no_sbi:
1422         lustre_common_put_super(sb);
1423
1424         cl_env_cache_purge(~0);
1425
1426         EXIT;
1427 } /* client_put_super */
1428
1429 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1430 {
1431         struct inode *inode = NULL;
1432
1433         /* NOTE: we depend on atomic igrab() -bzzz */
1434         lock_res_and_lock(lock);
1435         if (lock->l_resource->lr_lvb_inode) {
1436                 struct ll_inode_info * lli;
1437                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1438                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1439                         inode = igrab(lock->l_resource->lr_lvb_inode);
1440                 } else {
1441                         inode = lock->l_resource->lr_lvb_inode;
1442                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1443                                          D_WARNING, lock, "lr_lvb_inode %p is "
1444                                          "bogus: magic %08x",
1445                                          lock->l_resource->lr_lvb_inode,
1446                                          lli->lli_inode_magic);
1447                         inode = NULL;
1448                 }
1449         }
1450         unlock_res_and_lock(lock);
1451         return inode;
1452 }
1453
1454 void ll_dir_clear_lsm_md(struct inode *inode)
1455 {
1456         struct ll_inode_info *lli = ll_i2info(inode);
1457
1458         LASSERT(S_ISDIR(inode->i_mode));
1459
1460         if (lli->lli_lsm_md) {
1461                 lmv_free_memmd(lli->lli_lsm_md);
1462                 lli->lli_lsm_md = NULL;
1463         }
1464
1465         if (lli->lli_default_lsm_md) {
1466                 lmv_free_memmd(lli->lli_default_lsm_md);
1467                 lli->lli_default_lsm_md = NULL;
1468         }
1469 }
1470
1471 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1472                                       const struct lu_fid *fid,
1473                                       struct lustre_md *md)
1474 {
1475         struct ll_sb_info       *sbi = ll_s2sbi(sb);
1476         struct mdt_body         *body = md->body;
1477         struct inode            *inode;
1478         ino_t                   ino;
1479         ENTRY;
1480
1481         ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1482         inode = iget_locked(sb, ino);
1483         if (inode == NULL) {
1484                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1485                        sbi->ll_fsname, PFID(fid));
1486                 RETURN(ERR_PTR(-ENOENT));
1487         }
1488
1489         if (inode->i_state & I_NEW) {
1490                 struct ll_inode_info *lli = ll_i2info(inode);
1491                 struct lmv_stripe_md *lsm = md->lmv;
1492
1493                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1494                                 (body->mbo_mode & S_IFMT);
1495                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1496                          PFID(fid));
1497
1498                 inode->i_mtime.tv_sec = 0;
1499                 inode->i_atime.tv_sec = 0;
1500                 inode->i_ctime.tv_sec = 0;
1501                 inode->i_rdev = 0;
1502
1503 #ifdef HAVE_BACKING_DEV_INFO
1504                 /* initializing backing dev info. */
1505                 inode->i_mapping->backing_dev_info =
1506                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1507 #endif
1508                 inode->i_op = &ll_dir_inode_operations;
1509                 inode->i_fop = &ll_dir_operations;
1510                 lli->lli_fid = *fid;
1511                 ll_lli_init(lli);
1512
1513                 LASSERT(lsm != NULL);
1514                 /* master object FID */
1515                 lli->lli_pfid = body->mbo_fid1;
1516                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1517                        lli, PFID(fid), PFID(&lli->lli_pfid));
1518                 unlock_new_inode(inode);
1519         }
1520
1521         RETURN(inode);
1522 }
1523
1524 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1525 {
1526         struct lu_fid *fid;
1527         struct lmv_stripe_md *lsm = md->lmv;
1528         struct ll_inode_info *lli = ll_i2info(inode);
1529         int i;
1530
1531         LASSERT(lsm != NULL);
1532
1533         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1534                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1535         lsm_md_dump(D_INODE, lsm);
1536
1537         if (!lmv_dir_striped(lsm))
1538                 goto out;
1539
1540         /* XXX sigh, this lsm_root initialization should be in
1541          * LMV layer, but it needs ll_iget right now, so we
1542          * put this here right now. */
1543         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1544                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1545                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1546
1547                 if (!fid_is_sane(fid))
1548                         continue;
1549
1550                 /* Unfortunately ll_iget will call ll_update_inode,
1551                  * where the initialization of slave inode is slightly
1552                  * different, so it reset lsm_md to NULL to avoid
1553                  * initializing lsm for slave inode. */
1554                 lsm->lsm_md_oinfo[i].lmo_root =
1555                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1556                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1557                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1558
1559                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1560                         while (i-- > 0) {
1561                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1562                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1563                         }
1564                         return rc;
1565                 }
1566         }
1567 out:
1568         lli->lli_lsm_md = lsm;
1569
1570         return 0;
1571 }
1572
1573 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1574 {
1575         struct ll_inode_info *lli = ll_i2info(inode);
1576
1577         if (!md->default_lmv) {
1578                 /* clear default lsm */
1579                 if (lli->lli_default_lsm_md) {
1580                         down_write(&lli->lli_lsm_sem);
1581                         if (lli->lli_default_lsm_md) {
1582                                 lmv_free_memmd(lli->lli_default_lsm_md);
1583                                 lli->lli_default_lsm_md = NULL;
1584                         }
1585                         up_write(&lli->lli_lsm_sem);
1586                 }
1587         } else if (lli->lli_default_lsm_md) {
1588                 /* update default lsm if it changes */
1589                 down_read(&lli->lli_lsm_sem);
1590                 if (lli->lli_default_lsm_md &&
1591                     !lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1592                         up_read(&lli->lli_lsm_sem);
1593                         down_write(&lli->lli_lsm_sem);
1594                         if (lli->lli_default_lsm_md)
1595                                 lmv_free_memmd(lli->lli_default_lsm_md);
1596                         lli->lli_default_lsm_md = md->default_lmv;
1597                         lsm_md_dump(D_INODE, md->default_lmv);
1598                         md->default_lmv = NULL;
1599                         up_write(&lli->lli_lsm_sem);
1600                 } else {
1601                         up_read(&lli->lli_lsm_sem);
1602                 }
1603         } else {
1604                 /* init default lsm */
1605                 down_write(&lli->lli_lsm_sem);
1606                 lli->lli_default_lsm_md = md->default_lmv;
1607                 lsm_md_dump(D_INODE, md->default_lmv);
1608                 md->default_lmv = NULL;
1609                 up_write(&lli->lli_lsm_sem);
1610         }
1611 }
1612
1613 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1614 {
1615         struct ll_inode_info *lli = ll_i2info(inode);
1616         struct lmv_stripe_md *lsm = md->lmv;
1617         struct cl_attr  *attr;
1618         int rc = 0;
1619
1620         ENTRY;
1621
1622         LASSERT(S_ISDIR(inode->i_mode));
1623         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1624                PFID(ll_inode2fid(inode)));
1625
1626         /* update default LMV */
1627         if (md->default_lmv)
1628                 ll_update_default_lsm_md(inode, md);
1629
1630         /*
1631          * no striped information from request, lustre_md from req does not
1632          * include stripeEA, see ll_md_setattr()
1633          */
1634         if (!lsm)
1635                 RETURN(0);
1636
1637         /*
1638          * normally dir layout doesn't change, only take read lock to check
1639          * that to avoid blocking other MD operations.
1640          */
1641         down_read(&lli->lli_lsm_sem);
1642
1643         /* some current lookup initialized lsm, and unchanged */
1644         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1645                 GOTO(unlock, rc = 0);
1646
1647         /* if dir layout doesn't match, check whether version is increased,
1648          * which means layout is changed, this happens in dir split/merge and
1649          * lfsck.
1650          *
1651          * foreign LMV should not change.
1652          */
1653         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1654             lsm->lsm_md_layout_version <=
1655             lli->lli_lsm_md->lsm_md_layout_version) {
1656                 CERROR("%s: "DFID" dir layout mismatch:\n",
1657                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1658                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1659                 lsm_md_dump(D_ERROR, lsm);
1660                 GOTO(unlock, rc = -EINVAL);
1661         }
1662
1663         up_read(&lli->lli_lsm_sem);
1664         down_write(&lli->lli_lsm_sem);
1665         /* clear existing lsm */
1666         if (lli->lli_lsm_md) {
1667                 lmv_free_memmd(lli->lli_lsm_md);
1668                 lli->lli_lsm_md = NULL;
1669         }
1670
1671         rc = ll_init_lsm_md(inode, md);
1672         up_write(&lli->lli_lsm_sem);
1673
1674         if (rc)
1675                 RETURN(rc);
1676
1677         /* set md->lmv to NULL, so the following free lustre_md will not free
1678          * this lsm.
1679          */
1680         md->lmv = NULL;
1681
1682         /* md_merge_attr() may take long, since lsm is already set, switch to
1683          * read lock.
1684          */
1685         down_read(&lli->lli_lsm_sem);
1686
1687         if (!lmv_dir_striped(lli->lli_lsm_md))
1688                 GOTO(unlock, rc = 0);
1689
1690         OBD_ALLOC_PTR(attr);
1691         if (!attr)
1692                 GOTO(unlock, rc = -ENOMEM);
1693
1694         /* validate the lsm */
1695         rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1696                            ll_md_blocking_ast);
1697         if (!rc) {
1698                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1699                         md->body->mbo_nlink = attr->cat_nlink;
1700                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1701                         md->body->mbo_size = attr->cat_size;
1702                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1703                         md->body->mbo_atime = attr->cat_atime;
1704                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1705                         md->body->mbo_ctime = attr->cat_ctime;
1706                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1707                         md->body->mbo_mtime = attr->cat_mtime;
1708         }
1709
1710         OBD_FREE_PTR(attr);
1711         GOTO(unlock, rc);
1712 unlock:
1713         up_read(&lli->lli_lsm_sem);
1714
1715         return rc;
1716 }
1717
1718 void ll_clear_inode(struct inode *inode)
1719 {
1720         struct ll_inode_info *lli = ll_i2info(inode);
1721         struct ll_sb_info *sbi = ll_i2sbi(inode);
1722
1723         ENTRY;
1724
1725         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1726                PFID(ll_inode2fid(inode)), inode);
1727
1728         if (S_ISDIR(inode->i_mode)) {
1729                 /* these should have been cleared in ll_file_release */
1730                 LASSERT(lli->lli_opendir_key == NULL);
1731                 LASSERT(lli->lli_sai == NULL);
1732                 LASSERT(lli->lli_opendir_pid == 0);
1733         } else {
1734                 pcc_inode_free(inode);
1735         }
1736
1737         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1738
1739         LASSERT(!lli->lli_open_fd_write_count);
1740         LASSERT(!lli->lli_open_fd_read_count);
1741         LASSERT(!lli->lli_open_fd_exec_count);
1742
1743         if (lli->lli_mds_write_och)
1744                 ll_md_real_close(inode, FMODE_WRITE);
1745         if (lli->lli_mds_exec_och)
1746                 ll_md_real_close(inode, FMODE_EXEC);
1747         if (lli->lli_mds_read_och)
1748                 ll_md_real_close(inode, FMODE_READ);
1749
1750         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1751                 OBD_FREE(lli->lli_symlink_name,
1752                          strlen(lli->lli_symlink_name) + 1);
1753                 lli->lli_symlink_name = NULL;
1754         }
1755
1756         ll_xattr_cache_destroy(inode);
1757
1758         forget_all_cached_acls(inode);
1759         lli_clear_acl(lli);
1760         lli->lli_inode_magic = LLI_INODE_DEAD;
1761
1762         if (S_ISDIR(inode->i_mode))
1763                 ll_dir_clear_lsm_md(inode);
1764         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1765                 LASSERT(list_empty(&lli->lli_agl_list));
1766
1767         /*
1768          * XXX This has to be done before lsm is freed below, because
1769          * cl_object still uses inode lsm.
1770          */
1771         cl_inode_fini(inode);
1772
1773         llcrypt_put_encryption_info(inode);
1774
1775         EXIT;
1776 }
1777
1778 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1779 {
1780         struct lustre_md md;
1781         struct inode *inode = dentry->d_inode;
1782         struct ll_sb_info *sbi = ll_i2sbi(inode);
1783         struct ptlrpc_request *request = NULL;
1784         int rc, ia_valid;
1785         ENTRY;
1786
1787         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1788                                      LUSTRE_OPC_ANY, NULL);
1789         if (IS_ERR(op_data))
1790                 RETURN(PTR_ERR(op_data));
1791
1792         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1793         if (rc) {
1794                 ptlrpc_req_finished(request);
1795                 if (rc == -ENOENT) {
1796                         clear_nlink(inode);
1797                         /* Unlinked special device node? Or just a race?
1798                          * Pretend we done everything. */
1799                         if (!S_ISREG(inode->i_mode) &&
1800                             !S_ISDIR(inode->i_mode)) {
1801                                 ia_valid = op_data->op_attr.ia_valid;
1802                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1803                                 rc = simple_setattr(dentry, &op_data->op_attr);
1804                                 op_data->op_attr.ia_valid = ia_valid;
1805                         }
1806                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1807                         CERROR("md_setattr fails: rc = %d\n", rc);
1808                 }
1809                 RETURN(rc);
1810         }
1811
1812         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1813                               sbi->ll_md_exp, &md);
1814         if (rc) {
1815                 ptlrpc_req_finished(request);
1816                 RETURN(rc);
1817         }
1818
1819         ia_valid = op_data->op_attr.ia_valid;
1820         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1821          * cache is not cleared yet. */
1822         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1823         if (S_ISREG(inode->i_mode))
1824                 inode_lock(inode);
1825         rc = simple_setattr(dentry, &op_data->op_attr);
1826         if (S_ISREG(inode->i_mode))
1827                 inode_unlock(inode);
1828         op_data->op_attr.ia_valid = ia_valid;
1829
1830         rc = ll_update_inode(inode, &md);
1831         ptlrpc_req_finished(request);
1832
1833         RETURN(rc);
1834 }
1835
1836 /**
1837  * Zero portion of page that is part of @inode.
1838  * This implies, if necessary:
1839  * - taking cl_lock on range corresponding to concerned page
1840  * - grabbing vm page
1841  * - associating cl_page
1842  * - proceeding to clio read
1843  * - zeroing range in page
1844  * - proceeding to cl_page flush
1845  * - releasing cl_lock
1846  *
1847  * \param[in] inode     inode
1848  * \param[in] index     page index
1849  * \param[in] offset    offset in page to start zero from
1850  * \param[in] len       len to zero
1851  *
1852  * \retval 0            on success
1853  * \retval negative     errno on failure
1854  */
1855 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1856                     unsigned len)
1857 {
1858         struct ll_inode_info *lli = ll_i2info(inode);
1859         struct cl_object *clob = lli->lli_clob;
1860         __u16 refcheck;
1861         struct lu_env *env = NULL;
1862         struct cl_io *io = NULL;
1863         struct cl_page *clpage = NULL;
1864         struct page *vmpage = NULL;
1865         unsigned from = index << PAGE_SHIFT;
1866         struct cl_lock *lock = NULL;
1867         struct cl_lock_descr *descr = NULL;
1868         struct cl_2queue *queue = NULL;
1869         struct cl_sync_io *anchor = NULL;
1870         bool holdinglock = false;
1871         bool lockedbymyself = true;
1872         int rc;
1873
1874         ENTRY;
1875
1876         env = cl_env_get(&refcheck);
1877         if (IS_ERR(env))
1878                 RETURN(PTR_ERR(env));
1879
1880         io = vvp_env_thread_io(env);
1881         io->ci_obj = clob;
1882         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1883         if (rc)
1884                 GOTO(putenv, rc);
1885
1886         lock = vvp_env_lock(env);
1887         descr = &lock->cll_descr;
1888         descr->cld_obj   = io->ci_obj;
1889         descr->cld_start = cl_index(io->ci_obj, from);
1890         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1891         descr->cld_mode  = CLM_WRITE;
1892         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1893
1894         /* request lock for page */
1895         rc = cl_lock_request(env, io, lock);
1896         /* -ECANCELED indicates a matching lock with a different extent
1897          * was already present, and -EEXIST indicates a matching lock
1898          * on exactly the same extent was already present.
1899          * In both cases it means we are covered.
1900          */
1901         if (rc == -ECANCELED || rc == -EEXIST)
1902                 rc = 0;
1903         else if (rc < 0)
1904                 GOTO(iofini, rc);
1905         else
1906                 holdinglock = true;
1907
1908         /* grab page */
1909         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1910         if (vmpage == NULL)
1911                 GOTO(rellock, rc = -EOPNOTSUPP);
1912
1913         if (!PageDirty(vmpage)) {
1914                 /* associate cl_page */
1915                 clpage = cl_page_find(env, clob, vmpage->index,
1916                                       vmpage, CPT_CACHEABLE);
1917                 if (IS_ERR(clpage))
1918                         GOTO(pagefini, rc = PTR_ERR(clpage));
1919
1920                 cl_page_assume(env, io, clpage);
1921         }
1922
1923         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1924             !PageWriteback(vmpage)) {
1925                 /* read page */
1926                 /* set PagePrivate2 to detect special case of empty page
1927                  * in osc_brw_fini_request()
1928                  */
1929                 SetPagePrivate2(vmpage);
1930                 rc = ll_io_read_page(env, io, clpage, NULL);
1931                 if (!PagePrivate2(vmpage))
1932                         /* PagePrivate2 was cleared in osc_brw_fini_request()
1933                          * meaning we read an empty page. In this case, in order
1934                          * to avoid allocating unnecessary block in truncated
1935                          * file, we must not zero and write as below. Subsequent
1936                          * server-side truncate will handle things correctly.
1937                          */
1938                         GOTO(clpfini, rc = 0);
1939                 ClearPagePrivate2(vmpage);
1940                 if (rc)
1941                         GOTO(clpfini, rc);
1942                 lockedbymyself = trylock_page(vmpage);
1943                 cl_page_assume(env, io, clpage);
1944         }
1945
1946         /* zero range in page */
1947         zero_user(vmpage, offset, len);
1948
1949         if (holdinglock && clpage) {
1950                 /* explicitly write newly modified page */
1951                 queue = &io->ci_queue;
1952                 cl_2queue_init(queue);
1953                 anchor = &vvp_env_info(env)->vti_anchor;
1954                 cl_sync_io_init(anchor, 1);
1955                 clpage->cp_sync_io = anchor;
1956                 cl_2queue_add(queue, clpage);
1957                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
1958                 if (rc)
1959                         GOTO(queuefini1, rc);
1960                 rc = cl_sync_io_wait(env, anchor, 0);
1961                 if (rc)
1962                         GOTO(queuefini2, rc);
1963                 cl_page_assume(env, io, clpage);
1964
1965 queuefini2:
1966                 cl_2queue_discard(env, io, queue);
1967 queuefini1:
1968                 cl_2queue_disown(env, io, queue);
1969                 cl_2queue_fini(env, queue);
1970         }
1971
1972 clpfini:
1973         if (clpage)
1974                 cl_page_put(env, clpage);
1975 pagefini:
1976         if (lockedbymyself) {
1977                 unlock_page(vmpage);
1978                 put_page(vmpage);
1979         }
1980 rellock:
1981         if (holdinglock)
1982                 cl_lock_release(env, lock);
1983 iofini:
1984         cl_io_fini(env, io);
1985 putenv:
1986         if (env)
1987                 cl_env_put(env, &refcheck);
1988
1989         RETURN(rc);
1990 }
1991
1992 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1993  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1994  * keep these values until such a time that objects are allocated for it.
1995  * We do the MDS operations first, as it is checking permissions for us.
1996  * We don't to the MDS RPC if there is nothing that we want to store there,
1997  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1998  * going to do an RPC anyways.
1999  *
2000  * If we are doing a truncate, we will send the mtime and ctime updates
2001  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
2002  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
2003  * at the same time.
2004  *
2005  * In case of HSMimport, we only set attr on MDS.
2006  */
2007 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
2008                    enum op_xvalid xvalid, bool hsm_import)
2009 {
2010         struct inode *inode = dentry->d_inode;
2011         struct ll_inode_info *lli = ll_i2info(inode);
2012         struct md_op_data *op_data = NULL;
2013         ktime_t kstart = ktime_get();
2014         int rc = 0;
2015
2016         ENTRY;
2017
2018         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
2019                "valid %x, hsm_import %d\n",
2020                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
2021                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
2022                hsm_import);
2023
2024         if (attr->ia_valid & ATTR_SIZE) {
2025                 /* Check new size against VFS/VM file size limit and rlimit */
2026                 rc = inode_newsize_ok(inode, attr->ia_size);
2027                 if (rc)
2028                         RETURN(rc);
2029
2030                 /* The maximum Lustre file size is variable, based on the
2031                  * OST maximum object size and number of stripes.  This
2032                  * needs another check in addition to the VFS check above. */
2033                 if (attr->ia_size > ll_file_maxbytes(inode)) {
2034                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
2035                                PFID(&lli->lli_fid), attr->ia_size,
2036                                ll_file_maxbytes(inode));
2037                         RETURN(-EFBIG);
2038                 }
2039
2040                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2041         }
2042
2043         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
2044         if (attr->ia_valid & TIMES_SET_FLAGS) {
2045                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2046                     !capable(CAP_FOWNER))
2047                         RETURN(-EPERM);
2048         }
2049
2050         /* We mark all of the fields "set" so MDS/OST does not re-set them */
2051         if (!(xvalid & OP_XVALID_CTIME_SET) &&
2052              (attr->ia_valid & ATTR_CTIME)) {
2053                 attr->ia_ctime = current_time(inode);
2054                 xvalid |= OP_XVALID_CTIME_SET;
2055         }
2056         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
2057             (attr->ia_valid & ATTR_ATIME)) {
2058                 attr->ia_atime = current_time(inode);
2059                 attr->ia_valid |= ATTR_ATIME_SET;
2060         }
2061         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
2062             (attr->ia_valid & ATTR_MTIME)) {
2063                 attr->ia_mtime = current_time(inode);
2064                 attr->ia_valid |= ATTR_MTIME_SET;
2065         }
2066
2067         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
2068                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
2069                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
2070                        ktime_get_real_seconds());
2071
2072         if (S_ISREG(inode->i_mode))
2073                 inode_unlock(inode);
2074
2075         /* We always do an MDS RPC, even if we're only changing the size;
2076          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
2077
2078         OBD_ALLOC_PTR(op_data);
2079         if (op_data == NULL)
2080                 GOTO(out, rc = -ENOMEM);
2081
2082         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
2083                 /* If we are changing file size, file content is
2084                  * modified, flag it.
2085                  */
2086                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2087                 op_data->op_bias |= MDS_DATA_MODIFIED;
2088                 clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
2089         }
2090
2091         if (attr->ia_valid & ATTR_FILE) {
2092                 struct ll_file_data *fd = attr->ia_file->private_data;
2093
2094                 if (fd->fd_lease_och)
2095                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2096         }
2097
2098         op_data->op_attr = *attr;
2099         op_data->op_xvalid = xvalid;
2100
2101         rc = ll_md_setattr(dentry, op_data);
2102         if (rc)
2103                 GOTO(out, rc);
2104
2105         if (!S_ISREG(inode->i_mode) || hsm_import)
2106                 GOTO(out, rc = 0);
2107
2108         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2109                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2110             xvalid & OP_XVALID_CTIME_SET) {
2111                 bool cached = false;
2112
2113                 rc = pcc_inode_setattr(inode, attr, &cached);
2114                 if (cached) {
2115                         if (rc) {
2116                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2117                                        "rc = %d\n",
2118                                        ll_i2sbi(inode)->ll_fsname,
2119                                        PFID(&lli->lli_fid), rc);
2120                                 GOTO(out, rc);
2121                         }
2122                 } else {
2123                         unsigned int flags = 0;
2124
2125                         /* For truncate and utimes sending attributes to OSTs,
2126                          * setting mtime/atime to the past will be performed
2127                          * under PW [0:EOF] extent lock (new_size:EOF for
2128                          * truncate). It may seem excessive to send mtime/atime
2129                          * updates to OSTs when not setting times to past, but
2130                          * it is necessary due to possible time
2131                          * de-synchronization between MDT inode and OST objects
2132                          */
2133                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
2134                             attr->ia_valid & ATTR_SIZE) {
2135                                 xvalid |= OP_XVALID_FLAGS;
2136                                 flags = LUSTRE_ENCRYPT_FL;
2137                                 /* Call to ll_io_zero_page is not necessary if
2138                                  * truncating on PAGE_SIZE boundary, because
2139                                  * whole pages will be wiped.
2140                                  * In case of Direct IO, all we need is to set
2141                                  * new size.
2142                                  */
2143                                 if (attr->ia_size & ~PAGE_MASK &&
2144                                     !(attr->ia_valid & ATTR_FILE &&
2145                                       attr->ia_file->f_flags & O_DIRECT)) {
2146                                         pgoff_t offset =
2147                                                 attr->ia_size & (PAGE_SIZE - 1);
2148
2149                                         rc = ll_io_zero_page(inode,
2150                                                     attr->ia_size >> PAGE_SHIFT,
2151                                                     offset, PAGE_SIZE - offset);
2152                                         if (rc)
2153                                                 GOTO(out, rc);
2154                                 }
2155                         }
2156                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2157                 }
2158         }
2159
2160         /* If the file was restored, it needs to set dirty flag.
2161          *
2162          * We've already sent MDS_DATA_MODIFIED flag in
2163          * ll_md_setattr() for truncate. However, the MDT refuses to
2164          * set the HS_DIRTY flag on released files, so we have to set
2165          * it again if the file has been restored. Please check how
2166          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2167          *
2168          * Please notice that if the file is not released, the previous
2169          * MDS_DATA_MODIFIED has taken effect and usually
2170          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2171          * This way we can save an RPC for common open + trunc
2172          * operation. */
2173         if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
2174                 struct hsm_state_set hss = {
2175                         .hss_valid = HSS_SETMASK,
2176                         .hss_setmask = HS_DIRTY,
2177                 };
2178                 int rc2;
2179
2180                 rc2 = ll_hsm_state_set(inode, &hss);
2181                 /* truncate and write can happen at the same time, so that
2182                  * the file can be set modified even though the file is not
2183                  * restored from released state, and ll_hsm_state_set() is
2184                  * not applicable for the file, and rc2 < 0 is normal in this
2185                  * case. */
2186                 if (rc2 < 0)
2187                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2188                                PFID(ll_inode2fid(inode)), rc2);
2189         }
2190
2191         EXIT;
2192 out:
2193         if (op_data != NULL)
2194                 ll_finish_md_op_data(op_data);
2195
2196         if (S_ISREG(inode->i_mode)) {
2197                 inode_lock(inode);
2198                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2199                         inode_dio_wait(inode);
2200                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2201                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2202                  * inode flags, so there is a gap where S_NOSEC is not set.
2203                  * This can cause a writer to take the i_mutex unnecessarily,
2204                  * but this is safe to do and should be rare. */
2205                 inode_has_no_xattr(inode);
2206         }
2207
2208         if (!rc)
2209                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2210                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2211                                    ktime_us_delta(ktime_get(), kstart));
2212
2213         return rc;
2214 }
2215
2216 int ll_setattr(struct dentry *de, struct iattr *attr)
2217 {
2218         int mode = de->d_inode->i_mode;
2219         enum op_xvalid xvalid = 0;
2220         int rc;
2221
2222         rc = llcrypt_prepare_setattr(de, attr);
2223         if (rc)
2224                 return rc;
2225
2226         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2227                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2228                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2229
2230         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2231                                (ATTR_SIZE|ATTR_MODE)) &&
2232             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2233              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2234               !(attr->ia_mode & S_ISGID))))
2235                 attr->ia_valid |= ATTR_FORCE;
2236
2237         if ((attr->ia_valid & ATTR_MODE) &&
2238             (mode & S_ISUID) &&
2239             !(attr->ia_mode & S_ISUID) &&
2240             !(attr->ia_valid & ATTR_KILL_SUID))
2241                 attr->ia_valid |= ATTR_KILL_SUID;
2242
2243         if ((attr->ia_valid & ATTR_MODE) &&
2244             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2245             !(attr->ia_mode & S_ISGID) &&
2246             !(attr->ia_valid & ATTR_KILL_SGID))
2247                 attr->ia_valid |= ATTR_KILL_SGID;
2248
2249         return ll_setattr_raw(de, attr, xvalid, false);
2250 }
2251
2252 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2253                        u32 flags)
2254 {
2255         struct obd_statfs obd_osfs = { 0 };
2256         time64_t max_age;
2257         int rc;
2258
2259         ENTRY;
2260         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2261
2262         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2263                 flags |= OBD_STATFS_NODELAY;
2264
2265         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2266         if (rc)
2267                 RETURN(rc);
2268
2269         osfs->os_type = LL_SUPER_MAGIC;
2270
2271         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2272               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2273
2274         if (osfs->os_state & OS_STATFS_SUM)
2275                 GOTO(out, rc);
2276
2277         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2278         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2279                 GOTO(out, rc = 0);
2280
2281         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2282                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2283                obd_osfs.os_files);
2284
2285         osfs->os_bsize = obd_osfs.os_bsize;
2286         osfs->os_blocks = obd_osfs.os_blocks;
2287         osfs->os_bfree = obd_osfs.os_bfree;
2288         osfs->os_bavail = obd_osfs.os_bavail;
2289
2290         /* If we have _some_ OSTs, but don't have as many free objects on the
2291          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2292          * to compensate, so that the "inodes in use" number is correct.
2293          * This should be kept in sync with lod_statfs() behaviour.
2294          */
2295         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2296                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2297                                  obd_osfs.os_ffree;
2298                 osfs->os_ffree = obd_osfs.os_ffree;
2299         }
2300
2301 out:
2302         RETURN(rc);
2303 }
2304
2305 static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
2306 {
2307         struct if_quotactl qctl = {
2308                 .qc_cmd = LUSTRE_Q_GETQUOTA,
2309                 .qc_type = PRJQUOTA,
2310                 .qc_valid = QC_GENERAL,
2311         };
2312         u64 limit, curblock;
2313         int ret;
2314
2315         qctl.qc_id = ll_i2info(inode)->lli_projid;
2316         ret = quotactl_ioctl(ll_i2sbi(inode), &qctl);
2317         if (ret) {
2318                 /* ignore errors if project ID does not have
2319                  * a quota limit or feature unsupported.
2320                  */
2321                 if (ret == -ESRCH || ret == -EOPNOTSUPP)
2322                         ret = 0;
2323                 return ret;
2324         }
2325
2326         limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
2327                  qctl.qc_dqblk.dqb_bsoftlimit :
2328                  qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
2329         if (limit && sfs->f_blocks > limit) {
2330                 curblock = (qctl.qc_dqblk.dqb_curspace +
2331                                 sfs->f_bsize - 1) / sfs->f_bsize;
2332                 sfs->f_blocks = limit;
2333                 sfs->f_bfree = sfs->f_bavail =
2334                         (sfs->f_blocks > curblock) ?
2335                         (sfs->f_blocks - curblock) : 0;
2336         }
2337
2338         limit = qctl.qc_dqblk.dqb_isoftlimit ?
2339                 qctl.qc_dqblk.dqb_isoftlimit :
2340                 qctl.qc_dqblk.dqb_ihardlimit;
2341         if (limit && sfs->f_files > limit) {
2342                 sfs->f_files = limit;
2343                 sfs->f_ffree = (sfs->f_files >
2344                         qctl.qc_dqblk.dqb_curinodes) ?
2345                         (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
2346         }
2347
2348         return 0;
2349 }
2350
2351 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2352 {
2353         struct super_block *sb = de->d_sb;
2354         struct obd_statfs osfs;
2355         __u64 fsid = huge_encode_dev(sb->s_dev);
2356         ktime_t kstart = ktime_get();
2357         int rc;
2358
2359         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2360
2361         /* Some amount of caching on the client is allowed */
2362         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2363         if (rc)
2364                 return rc;
2365
2366         statfs_unpack(sfs, &osfs);
2367
2368         /* We need to downshift for all 32-bit kernels, because we can't
2369          * tell if the kernel is being called via sys_statfs64() or not.
2370          * Stop before overflowing f_bsize - in which case it is better
2371          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2372         if (sizeof(long) < 8) {
2373                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2374                         sfs->f_bsize <<= 1;
2375
2376                         osfs.os_blocks >>= 1;
2377                         osfs.os_bfree >>= 1;
2378                         osfs.os_bavail >>= 1;
2379                 }
2380         }
2381
2382         sfs->f_blocks = osfs.os_blocks;
2383         sfs->f_bfree = osfs.os_bfree;
2384         sfs->f_bavail = osfs.os_bavail;
2385         sfs->f_fsid.val[0] = (__u32)fsid;
2386         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2387         if (ll_i2info(de->d_inode)->lli_projid)
2388                 return ll_statfs_project(de->d_inode, sfs);
2389
2390         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2391                            ktime_us_delta(ktime_get(), kstart));
2392
2393         return 0;
2394 }
2395
2396 void ll_inode_size_lock(struct inode *inode)
2397 {
2398         struct ll_inode_info *lli;
2399
2400         LASSERT(!S_ISDIR(inode->i_mode));
2401
2402         lli = ll_i2info(inode);
2403         mutex_lock(&lli->lli_size_mutex);
2404 }
2405
2406 void ll_inode_size_unlock(struct inode *inode)
2407 {
2408         struct ll_inode_info *lli;
2409
2410         lli = ll_i2info(inode);
2411         mutex_unlock(&lli->lli_size_mutex);
2412 }
2413
2414 void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags)
2415 {
2416         /* do not clear encryption flag */
2417         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2418         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2419         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2420                 set_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2421         else
2422                 clear_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2423 }
2424
2425 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2426 {
2427         struct ll_inode_info *lli = ll_i2info(inode);
2428         struct mdt_body *body = md->body;
2429         struct ll_sb_info *sbi = ll_i2sbi(inode);
2430         int rc = 0;
2431
2432         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2433                 rc = cl_file_inode_init(inode, md);
2434                 if (rc)
2435                         return rc;
2436         }
2437
2438         if (S_ISDIR(inode->i_mode)) {
2439                 rc = ll_update_lsm_md(inode, md);
2440                 if (rc != 0)
2441                         return rc;
2442         }
2443
2444         if (body->mbo_valid & OBD_MD_FLACL)
2445                 lli_replace_acl(lli, md);
2446
2447         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
2448                                         sbi->ll_flags & LL_SBI_32BIT_API);
2449         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2450
2451         if (body->mbo_valid & OBD_MD_FLATIME) {
2452                 if (body->mbo_atime > inode->i_atime.tv_sec)
2453                         inode->i_atime.tv_sec = body->mbo_atime;
2454                 lli->lli_atime = body->mbo_atime;
2455         }
2456
2457         if (body->mbo_valid & OBD_MD_FLMTIME) {
2458                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2459                         CDEBUG(D_INODE,
2460                                "setting ino %lu mtime from %lld to %llu\n",
2461                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2462                                body->mbo_mtime);
2463                         inode->i_mtime.tv_sec = body->mbo_mtime;
2464                 }
2465                 lli->lli_mtime = body->mbo_mtime;
2466         }
2467
2468         if (body->mbo_valid & OBD_MD_FLCTIME) {
2469                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2470                         inode->i_ctime.tv_sec = body->mbo_ctime;
2471                 lli->lli_ctime = body->mbo_ctime;
2472         }
2473
2474         if (body->mbo_valid & OBD_MD_FLBTIME)
2475                 lli->lli_btime = body->mbo_btime;
2476
2477         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2478         if (body->mbo_valid & OBD_MD_FLFLAGS)
2479                 ll_update_inode_flags(inode, body->mbo_flags);
2480         if (body->mbo_valid & OBD_MD_FLMODE)
2481                 inode->i_mode = (inode->i_mode & S_IFMT) |
2482                                 (body->mbo_mode & ~S_IFMT);
2483
2484         if (body->mbo_valid & OBD_MD_FLTYPE)
2485                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2486                                 (body->mbo_mode & S_IFMT);
2487
2488         LASSERT(inode->i_mode != 0);
2489         if (body->mbo_valid & OBD_MD_FLUID)
2490                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2491         if (body->mbo_valid & OBD_MD_FLGID)
2492                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2493         if (body->mbo_valid & OBD_MD_FLPROJID)
2494                 lli->lli_projid = body->mbo_projid;
2495         if (body->mbo_valid & OBD_MD_FLNLINK)
2496                 set_nlink(inode, body->mbo_nlink);
2497         if (body->mbo_valid & OBD_MD_FLRDEV)
2498                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2499
2500         if (body->mbo_valid & OBD_MD_FLID) {
2501                 /* FID shouldn't be changed! */
2502                 if (fid_is_sane(&lli->lli_fid)) {
2503                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2504                                  "Trying to change FID "DFID
2505                                  " to the "DFID", inode "DFID"(%p)\n",
2506                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2507                                  PFID(ll_inode2fid(inode)), inode);
2508                 } else {
2509                         lli->lli_fid = body->mbo_fid1;
2510                 }
2511         }
2512
2513         LASSERT(fid_seq(&lli->lli_fid) != 0);
2514
2515         lli->lli_attr_valid = body->mbo_valid;
2516         if (body->mbo_valid & OBD_MD_FLSIZE) {
2517                 i_size_write(inode, body->mbo_size);
2518
2519                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2520                        PFID(ll_inode2fid(inode)),
2521                        (unsigned long long)body->mbo_size);
2522
2523                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2524                         inode->i_blocks = body->mbo_blocks;
2525         } else {
2526                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2527                         lli->lli_lazysize = body->mbo_size;
2528                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2529                         lli->lli_lazyblocks = body->mbo_blocks;
2530         }
2531
2532         if (body->mbo_valid & OBD_MD_TSTATE) {
2533                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2534                  * clear it when done to ensure to start again
2535                  * glimpsing updated attrs
2536                  */
2537                 if (body->mbo_t_state & MS_RESTORE)
2538                         set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2539                 else
2540                         clear_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2541         }
2542
2543         return 0;
2544 }
2545
2546 int ll_read_inode2(struct inode *inode, void *opaque)
2547 {
2548         struct lustre_md *md = opaque;
2549         struct ll_inode_info *lli = ll_i2info(inode);
2550         int     rc;
2551         ENTRY;
2552
2553         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2554                PFID(&lli->lli_fid), inode);
2555
2556         /* Core attributes from the MDS first.  This is a new inode, and
2557          * the VFS doesn't zero times in the core inode so we have to do
2558          * it ourselves.  They will be overwritten by either MDS or OST
2559          * attributes - we just need to make sure they aren't newer.
2560          */
2561         inode->i_mtime.tv_sec = 0;
2562         inode->i_atime.tv_sec = 0;
2563         inode->i_ctime.tv_sec = 0;
2564         inode->i_rdev = 0;
2565         rc = ll_update_inode(inode, md);
2566         if (rc != 0)
2567                 RETURN(rc);
2568
2569         /* OIDEBUG(inode); */
2570
2571 #ifdef HAVE_BACKING_DEV_INFO
2572         /* initializing backing dev info. */
2573         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2574 #endif
2575         if (S_ISREG(inode->i_mode)) {
2576                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2577                 inode->i_op = &ll_file_inode_operations;
2578                 inode->i_fop = sbi->ll_fop;
2579                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2580                 EXIT;
2581         } else if (S_ISDIR(inode->i_mode)) {
2582                 inode->i_op = &ll_dir_inode_operations;
2583                 inode->i_fop = &ll_dir_operations;
2584                 EXIT;
2585         } else if (S_ISLNK(inode->i_mode)) {
2586                 inode->i_op = &ll_fast_symlink_inode_operations;
2587                 EXIT;
2588         } else {
2589                 inode->i_op = &ll_special_inode_operations;
2590
2591                 init_special_inode(inode, inode->i_mode,
2592                                    inode->i_rdev);
2593
2594                 EXIT;
2595         }
2596
2597         return 0;
2598 }
2599
2600 void ll_delete_inode(struct inode *inode)
2601 {
2602         struct ll_inode_info *lli = ll_i2info(inode);
2603         struct address_space *mapping = &inode->i_data;
2604         unsigned long nrpages;
2605         unsigned long flags;
2606
2607         ENTRY;
2608
2609         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2610                 /* It is last chance to write out dirty pages,
2611                  * otherwise we may lose data while umount.
2612                  *
2613                  * If i_nlink is 0 then just discard data. This is safe because
2614                  * local inode gets i_nlink 0 from server only for the last
2615                  * unlink, so that file is not opened somewhere else
2616                  */
2617                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2618                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2619         }
2620         truncate_inode_pages_final(mapping);
2621
2622         /* Workaround for LU-118: Note nrpages may not be totally updated when
2623          * truncate_inode_pages() returns, as there can be a page in the process
2624          * of deletion (inside __delete_from_page_cache()) in the specified
2625          * range. Thus mapping->nrpages can be non-zero when this function
2626          * returns even after truncation of the whole mapping.  Only do this if
2627          * npages isn't already zero.
2628          */
2629         nrpages = mapping->nrpages;
2630         if (nrpages) {
2631                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2632                 nrpages = mapping->nrpages;
2633                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2634         } /* Workaround end */
2635
2636         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2637                  "see https://jira.whamcloud.com/browse/LU-118\n",
2638                  ll_i2sbi(inode)->ll_fsname,
2639                  PFID(ll_inode2fid(inode)), inode, nrpages);
2640
2641         ll_clear_inode(inode);
2642         clear_inode(inode);
2643
2644         EXIT;
2645 }
2646
2647 int ll_iocontrol(struct inode *inode, struct file *file,
2648                  unsigned int cmd, unsigned long arg)
2649 {
2650         struct ll_sb_info *sbi = ll_i2sbi(inode);
2651         struct ptlrpc_request *req = NULL;
2652         int rc, flags = 0;
2653         ENTRY;
2654
2655         switch (cmd) {
2656         case FS_IOC_GETFLAGS: {
2657                 struct mdt_body *body;
2658                 struct md_op_data *op_data;
2659
2660                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2661                                              0, 0, LUSTRE_OPC_ANY,
2662                                              NULL);
2663                 if (IS_ERR(op_data))
2664                         RETURN(PTR_ERR(op_data));
2665
2666                 op_data->op_valid = OBD_MD_FLFLAGS;
2667                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2668                 ll_finish_md_op_data(op_data);
2669                 if (rc) {
2670                         CERROR("%s: failure inode "DFID": rc = %d\n",
2671                                sbi->ll_md_exp->exp_obd->obd_name,
2672                                PFID(ll_inode2fid(inode)), rc);
2673                         RETURN(-abs(rc));
2674                 }
2675
2676                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2677
2678                 flags = body->mbo_flags;
2679
2680                 ptlrpc_req_finished(req);
2681
2682                 RETURN(put_user(flags, (int __user *)arg));
2683         }
2684         case FS_IOC_SETFLAGS: {
2685                 struct iattr *attr;
2686                 struct md_op_data *op_data;
2687                 struct cl_object *obj;
2688                 struct fsxattr fa = { 0 };
2689
2690                 if (get_user(flags, (int __user *)arg))
2691                         RETURN(-EFAULT);
2692
2693                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2694                 if (flags & LUSTRE_PROJINHERIT_FL)
2695                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2696
2697                 rc = ll_ioctl_check_project(inode, &fa);
2698                 if (rc)
2699                         RETURN(rc);
2700
2701                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2702                                              LUSTRE_OPC_ANY, NULL);
2703                 if (IS_ERR(op_data))
2704                         RETURN(PTR_ERR(op_data));
2705
2706                 op_data->op_attr_flags = flags;
2707                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2708                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2709                 ll_finish_md_op_data(op_data);
2710                 ptlrpc_req_finished(req);
2711                 if (rc)
2712                         RETURN(rc);
2713
2714                 ll_update_inode_flags(inode, flags);
2715
2716                 obj = ll_i2info(inode)->lli_clob;
2717                 if (obj == NULL)
2718                         RETURN(0);
2719
2720                 OBD_ALLOC_PTR(attr);
2721                 if (attr == NULL)
2722                         RETURN(-ENOMEM);
2723
2724                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2725
2726                 OBD_FREE_PTR(attr);
2727                 RETURN(rc);
2728         }
2729         default:
2730                 RETURN(-ENOSYS);
2731         }
2732
2733         RETURN(0);
2734 }
2735
2736 int ll_flush_ctx(struct inode *inode)
2737 {
2738         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2739
2740         CDEBUG(D_SEC, "flush context for user %d\n",
2741                from_kuid(&init_user_ns, current_uid()));
2742
2743         obd_set_info_async(NULL, sbi->ll_md_exp,
2744                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2745                            0, NULL, NULL);
2746         obd_set_info_async(NULL, sbi->ll_dt_exp,
2747                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2748                            0, NULL, NULL);
2749         return 0;
2750 }
2751
2752 /* umount -f client means force down, don't save state */
2753 void ll_umount_begin(struct super_block *sb)
2754 {
2755         struct ll_sb_info *sbi = ll_s2sbi(sb);
2756         struct obd_device *obd;
2757         struct obd_ioctl_data *ioc_data;
2758         int cnt;
2759         ENTRY;
2760
2761         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2762                sb->s_count, atomic_read(&sb->s_active));
2763
2764         obd = class_exp2obd(sbi->ll_md_exp);
2765         if (obd == NULL) {
2766                 CERROR("Invalid MDC connection handle %#llx\n",
2767                        sbi->ll_md_exp->exp_handle.h_cookie);
2768                 EXIT;
2769                 return;
2770         }
2771         obd->obd_force = 1;
2772
2773         obd = class_exp2obd(sbi->ll_dt_exp);
2774         if (obd == NULL) {
2775                 CERROR("Invalid LOV connection handle %#llx\n",
2776                        sbi->ll_dt_exp->exp_handle.h_cookie);
2777                 EXIT;
2778                 return;
2779         }
2780         obd->obd_force = 1;
2781
2782         OBD_ALLOC_PTR(ioc_data);
2783         if (ioc_data) {
2784                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2785                               sizeof *ioc_data, ioc_data, NULL);
2786
2787                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2788                               sizeof *ioc_data, ioc_data, NULL);
2789
2790                 OBD_FREE_PTR(ioc_data);
2791         }
2792
2793         /* Really, we'd like to wait until there are no requests outstanding,
2794          * and then continue.  For now, we just periodically checking for vfs
2795          * to decrement mnt_cnt and hope to finish it within 10sec.
2796          */
2797         cnt = 10;
2798         while (cnt > 0 &&
2799                !may_umount(sbi->ll_mnt.mnt)) {
2800                 ssleep(1);
2801                 cnt -= 1;
2802         }
2803
2804         EXIT;
2805 }
2806
2807 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2808 {
2809         struct ll_sb_info *sbi = ll_s2sbi(sb);
2810         char *profilenm = get_profile_name(sb);
2811         int err;
2812         __u32 read_only;
2813
2814         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
2815                 read_only = *flags & MS_RDONLY;
2816                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2817                                          sizeof(KEY_READ_ONLY),
2818                                          KEY_READ_ONLY, sizeof(read_only),
2819                                          &read_only, NULL);
2820                 if (err) {
2821                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2822                                       profilenm, read_only ?
2823                                       "read-only" : "read-write", err);
2824                         return err;
2825                 }
2826
2827                 if (read_only)
2828                         sb->s_flags |= SB_RDONLY;
2829                 else
2830                         sb->s_flags &= ~SB_RDONLY;
2831
2832                 if (sbi->ll_flags & LL_SBI_VERBOSE)
2833                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2834                                       read_only ?  "read-only" : "read-write");
2835         }
2836         return 0;
2837 }
2838
2839 /**
2840  * Cleanup the open handle that is cached on MDT-side.
2841  *
2842  * For open case, the client side open handling thread may hit error
2843  * after the MDT grant the open. Under such case, the client should
2844  * send close RPC to the MDT as cleanup; otherwise, the open handle
2845  * on the MDT will be leaked there until the client umount or evicted.
2846  *
2847  * In further, if someone unlinked the file, because the open handle
2848  * holds the reference on such file/object, then it will block the
2849  * subsequent threads that want to locate such object via FID.
2850  *
2851  * \param[in] sb        super block for this file-system
2852  * \param[in] open_req  pointer to the original open request
2853  */
2854 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2855 {
2856         struct mdt_body                 *body;
2857         struct md_op_data               *op_data;
2858         struct ptlrpc_request           *close_req = NULL;
2859         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
2860         ENTRY;
2861
2862         body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
2863         OBD_ALLOC_PTR(op_data);
2864         if (op_data == NULL) {
2865                 CWARN("%s: cannot allocate op_data to release open handle for "
2866                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
2867
2868                 RETURN_EXIT;
2869         }
2870
2871         op_data->op_fid1 = body->mbo_fid1;
2872         op_data->op_open_handle = body->mbo_open_handle;
2873         op_data->op_mod_time = ktime_get_real_seconds();
2874         md_close(exp, op_data, NULL, &close_req);
2875         ptlrpc_req_finished(close_req);
2876         ll_finish_md_op_data(op_data);
2877
2878         EXIT;
2879 }
2880
2881 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2882                   struct super_block *sb, struct lookup_intent *it)
2883 {
2884         struct ll_sb_info *sbi = NULL;
2885         struct lustre_md md = { NULL };
2886         bool default_lmv_deleted = false;
2887         int rc;
2888
2889         ENTRY;
2890
2891         LASSERT(*inode || sb);
2892         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2893         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2894                               sbi->ll_md_exp, &md);
2895         if (rc != 0)
2896                 GOTO(out, rc);
2897
2898         /*
2899          * clear default_lmv only if intent_getattr reply doesn't contain it.
2900          * but it needs to be done after iget, check this early because
2901          * ll_update_lsm_md() may change md.
2902          */
2903         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
2904             S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
2905                 default_lmv_deleted = true;
2906
2907         if (*inode) {
2908                 rc = ll_update_inode(*inode, &md);
2909                 if (rc != 0)
2910                         GOTO(out, rc);
2911         } else {
2912                 LASSERT(sb != NULL);
2913
2914                 /*
2915                  * At this point server returns to client's same fid as client
2916                  * generated for creating. So using ->fid1 is okay here.
2917                  */
2918                 if (!fid_is_sane(&md.body->mbo_fid1)) {
2919                         CERROR("%s: Fid is insane "DFID"\n",
2920                                 sbi->ll_fsname,
2921                                 PFID(&md.body->mbo_fid1));
2922                         GOTO(out, rc = -EINVAL);
2923                 }
2924
2925                 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
2926                                              sbi->ll_flags & LL_SBI_32BIT_API),
2927                                  &md);
2928                 if (IS_ERR(*inode)) {
2929                         lmd_clear_acl(&md);
2930                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2931                         *inode = NULL;
2932                         CERROR("new_inode -fatal: rc %d\n", rc);
2933                         GOTO(out, rc);
2934                 }
2935         }
2936
2937         /* Handling piggyback layout lock.
2938          * Layout lock can be piggybacked by getattr and open request.
2939          * The lsm can be applied to inode only if it comes with a layout lock
2940          * otherwise correct layout may be overwritten, for example:
2941          * 1. proc1: mdt returns a lsm but not granting layout
2942          * 2. layout was changed by another client
2943          * 3. proc2: refresh layout and layout lock granted
2944          * 4. proc1: to apply a stale layout */
2945         if (it != NULL && it->it_lock_mode != 0) {
2946                 struct lustre_handle lockh;
2947                 struct ldlm_lock *lock;
2948
2949                 lockh.cookie = it->it_lock_handle;
2950                 lock = ldlm_handle2lock(&lockh);
2951                 LASSERT(lock != NULL);
2952                 if (ldlm_has_layout(lock)) {
2953                         struct cl_object_conf conf;
2954
2955                         memset(&conf, 0, sizeof(conf));
2956                         conf.coc_opc = OBJECT_CONF_SET;
2957                         conf.coc_inode = *inode;
2958                         conf.coc_lock = lock;
2959                         conf.u.coc_layout = md.layout;
2960                         (void)ll_layout_conf(*inode, &conf);
2961                 }
2962                 LDLM_LOCK_PUT(lock);
2963         }
2964
2965         if (default_lmv_deleted)
2966                 ll_update_default_lsm_md(*inode, &md);
2967
2968         /* we may want to apply some policy for foreign file/dir */
2969         if (ll_sbi_has_foreign_symlink(sbi)) {
2970                 rc = ll_manage_foreign(*inode, &md);
2971                 if (rc < 0)
2972                         GOTO(out, rc);
2973         }
2974
2975         GOTO(out, rc = 0);
2976
2977 out:
2978         /* cleanup will be done if necessary */
2979         md_free_lustre_md(sbi->ll_md_exp, &md);
2980
2981         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
2982                 ll_intent_drop_lock(it);
2983                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, req);
2984         }
2985
2986         return rc;
2987 }
2988
2989 int ll_obd_statfs(struct inode *inode, void __user *arg)
2990 {
2991         struct ll_sb_info *sbi = NULL;
2992         struct obd_export *exp;
2993         struct obd_ioctl_data *data = NULL;
2994         __u32 type;
2995         int len = 0, rc;
2996
2997         if (inode)
2998                 sbi = ll_i2sbi(inode);
2999         if (!sbi)
3000                 GOTO(out_statfs, rc = -EINVAL);
3001
3002         rc = obd_ioctl_getdata(&data, &len, arg);
3003         if (rc)
3004                 GOTO(out_statfs, rc);
3005
3006         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
3007             !data->ioc_pbuf1 || !data->ioc_pbuf2)
3008                 GOTO(out_statfs, rc = -EINVAL);
3009
3010         if (data->ioc_inllen1 != sizeof(__u32) ||
3011             data->ioc_inllen2 != sizeof(__u32) ||
3012             data->ioc_plen1 != sizeof(struct obd_statfs) ||
3013             data->ioc_plen2 != sizeof(struct obd_uuid))
3014                 GOTO(out_statfs, rc = -EINVAL);
3015
3016         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
3017         if (type & LL_STATFS_LMV)
3018                 exp = sbi->ll_md_exp;
3019         else if (type & LL_STATFS_LOV)
3020                 exp = sbi->ll_dt_exp;
3021         else
3022                 GOTO(out_statfs, rc = -ENODEV);
3023
3024         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL);
3025         if (rc)
3026                 GOTO(out_statfs, rc);
3027 out_statfs:
3028         OBD_FREE_LARGE(data, len);
3029         return rc;
3030 }
3031
3032 /*
3033  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
3034  * be called early to avoid deadlock.
3035  */
3036 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
3037 {
3038         if (op_data->op_mea2_sem) {
3039                 up_read_non_owner(op_data->op_mea2_sem);
3040                 op_data->op_mea2_sem = NULL;
3041         }
3042
3043         if (op_data->op_mea1_sem) {
3044                 up_read_non_owner(op_data->op_mea1_sem);
3045                 op_data->op_mea1_sem = NULL;
3046         }
3047 }
3048
3049 /* this function prepares md_op_data hint for passing it down to MD stack. */
3050 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
3051                                       struct inode *i1, struct inode *i2,
3052                                       const char *name, size_t namelen,
3053                                       __u32 mode, enum md_op_code opc,
3054                                       void *data)
3055 {
3056         LASSERT(i1 != NULL);
3057
3058         if (name == NULL) {
3059                 /* Do not reuse namelen for something else. */
3060                 if (namelen != 0)
3061                         return ERR_PTR(-EINVAL);
3062         } else {
3063                 if (namelen > ll_i2sbi(i1)->ll_namelen)
3064                         return ERR_PTR(-ENAMETOOLONG);
3065
3066                 /* "/" is not valid name, but it's allowed */
3067                 if (!lu_name_is_valid_2(name, namelen) &&
3068                     strncmp("/", name, namelen) != 0)
3069                         return ERR_PTR(-EINVAL);
3070         }
3071
3072         if (op_data == NULL)
3073                 OBD_ALLOC_PTR(op_data);
3074
3075         if (op_data == NULL)
3076                 return ERR_PTR(-ENOMEM);
3077
3078         ll_i2gids(op_data->op_suppgids, i1, i2);
3079         op_data->op_fid1 = *ll_inode2fid(i1);
3080         op_data->op_code = opc;
3081
3082         if (S_ISDIR(i1->i_mode)) {
3083                 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
3084                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
3085                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
3086                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
3087         }
3088
3089         if (i2) {
3090                 op_data->op_fid2 = *ll_inode2fid(i2);
3091                 if (S_ISDIR(i2->i_mode)) {
3092                         if (i2 != i1) {
3093                                 /* i2 is typically a child of i1, and MUST be
3094                                  * further from the root to avoid deadlocks.
3095                                  */
3096                                 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
3097                                 op_data->op_mea2_sem =
3098                                                 &ll_i2info(i2)->lli_lsm_sem;
3099                         }
3100                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
3101                 }
3102         } else {
3103                 fid_zero(&op_data->op_fid2);
3104         }
3105
3106         if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
3107                 op_data->op_cli_flags |= CLI_HASH64;
3108
3109         if (ll_need_32bit_api(ll_i2sbi(i1)))
3110                 op_data->op_cli_flags |= CLI_API32;
3111
3112         op_data->op_name = name;
3113         op_data->op_namelen = namelen;
3114         op_data->op_mode = mode;
3115         op_data->op_mod_time = ktime_get_real_seconds();
3116         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
3117         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
3118         op_data->op_cap = cfs_curproc_cap_pack();
3119         op_data->op_mds = 0;
3120         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
3121              filename_is_volatile(name, namelen, &op_data->op_mds)) {
3122                 op_data->op_bias |= MDS_CREATE_VOLATILE;
3123         }
3124         op_data->op_data = data;
3125
3126         return op_data;
3127 }
3128
3129 void ll_finish_md_op_data(struct md_op_data *op_data)
3130 {
3131         ll_unlock_md_op_lsm(op_data);
3132         security_release_secctx(op_data->op_file_secctx,
3133                                 op_data->op_file_secctx_size);
3134         llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size);
3135         OBD_FREE_PTR(op_data);
3136 }
3137
3138 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
3139 {
3140         struct ll_sb_info *sbi;
3141
3142         LASSERT(seq && dentry);
3143         sbi = ll_s2sbi(dentry->d_sb);
3144
3145         if (sbi->ll_flags & LL_SBI_NOLCK)
3146                 seq_puts(seq, ",nolock");
3147
3148         /* "flock" is the default since 2.13, but it wasn't for many years,
3149          * so it is still useful to print this to show it is enabled.
3150          * Start to print "noflock" so it is now clear when flock is disabled.
3151          */
3152         if (sbi->ll_flags & LL_SBI_FLOCK)
3153                 seq_puts(seq, ",flock");
3154         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
3155                 seq_puts(seq, ",localflock");
3156         else
3157                 seq_puts(seq, ",noflock");
3158
3159         if (sbi->ll_flags & LL_SBI_USER_XATTR)
3160                 seq_puts(seq, ",user_xattr");
3161
3162         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
3163                 seq_puts(seq, ",lazystatfs");
3164
3165         if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
3166                 seq_puts(seq, ",user_fid2path");
3167
3168         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
3169                 seq_puts(seq, ",always_ping");
3170
3171         if (ll_sbi_has_test_dummy_encryption(sbi))
3172                 seq_puts(seq, ",test_dummy_encryption");
3173
3174         if (ll_sbi_has_encrypt(sbi))
3175                 seq_puts(seq, ",encrypt");
3176         else
3177                 seq_puts(seq, ",noencrypt");
3178
3179         if (sbi->ll_flags & LL_SBI_FOREIGN_SYMLINK) {
3180                 seq_puts(seq, ",foreign_symlink=");
3181                 seq_puts(seq, sbi->ll_foreign_symlink_prefix);
3182         }
3183
3184         RETURN(0);
3185 }
3186
3187 /**
3188  * Get obd name by cmd, and copy out to user space
3189  */
3190 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3191 {
3192         struct ll_sb_info *sbi = ll_i2sbi(inode);
3193         struct obd_device *obd;
3194         ENTRY;
3195
3196         if (cmd == OBD_IOC_GETNAME_OLD || cmd == OBD_IOC_GETDTNAME)
3197                 obd = class_exp2obd(sbi->ll_dt_exp);
3198         else if (cmd == OBD_IOC_GETMDNAME)
3199                 obd = class_exp2obd(sbi->ll_md_exp);
3200         else
3201                 RETURN(-EINVAL);
3202
3203         if (!obd)
3204                 RETURN(-ENOENT);
3205
3206         if (copy_to_user((void __user *)arg, obd->obd_name,
3207                          strlen(obd->obd_name) + 1))
3208                 RETURN(-EFAULT);
3209
3210         RETURN(0);
3211 }
3212
3213 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3214 {
3215         char *path = NULL;
3216
3217         struct path p;
3218
3219         p.dentry = dentry;
3220         p.mnt = current->fs->root.mnt;
3221         path_get(&p);
3222         path = d_path(&p, buf, bufsize);
3223         path_put(&p);
3224         return path;
3225 }
3226
3227 void ll_dirty_page_discard_warn(struct page *page, int ioret)
3228 {
3229         char *buf, *path = NULL;
3230         struct dentry *dentry = NULL;
3231         struct inode *inode = page->mapping->host;
3232
3233         /* this can be called inside spin lock so use GFP_ATOMIC. */
3234         buf = (char *)__get_free_page(GFP_ATOMIC);
3235         if (buf != NULL) {
3236                 dentry = d_find_alias(page->mapping->host);
3237                 if (dentry != NULL)
3238                         path = ll_d_path(dentry, buf, PAGE_SIZE);
3239         }
3240
3241         /* The below message is checked in recovery-small.sh test_24b */
3242         CDEBUG(D_WARNING,
3243                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3244                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3245                s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
3246                PFID(ll_inode2fid(inode)),
3247                (path && !IS_ERR(path)) ? path : "", ioret);
3248
3249         if (dentry != NULL)
3250                 dput(dentry);
3251
3252         if (buf != NULL)
3253                 free_page((unsigned long)buf);
3254 }
3255
3256 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3257                         struct lov_user_md **kbuf)
3258 {
3259         struct lov_user_md      lum;
3260         ssize_t                 lum_size;
3261         ENTRY;
3262
3263         if (copy_from_user(&lum, md, sizeof(lum)))
3264                 RETURN(-EFAULT);
3265
3266         lum_size = ll_lov_user_md_size(&lum);
3267         if (lum_size < 0)
3268                 RETURN(lum_size);
3269
3270         OBD_ALLOC_LARGE(*kbuf, lum_size);
3271         if (*kbuf == NULL)
3272                 RETURN(-ENOMEM);
3273
3274         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3275                 OBD_FREE_LARGE(*kbuf, lum_size);
3276                 RETURN(-EFAULT);
3277         }
3278
3279         RETURN(lum_size);
3280 }
3281
3282 /*
3283  * Compute llite root squash state after a change of root squash
3284  * configuration setting or add/remove of a lnet nid
3285  */
3286 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3287 {
3288         struct root_squash_info *squash = &sbi->ll_squash;
3289         int i;
3290         bool matched;
3291         struct lnet_process_id id;
3292
3293         /* Update norootsquash flag */
3294         spin_lock(&squash->rsi_lock);
3295         if (list_empty(&squash->rsi_nosquash_nids))
3296                 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3297         else {
3298                 /* Do not apply root squash as soon as one of our NIDs is
3299                  * in the nosquash_nids list */
3300                 matched = false;
3301                 i = 0;
3302                 while (LNetGetId(i++, &id) != -ENOENT) {
3303                         if (id.nid == LNET_NID_LO_0)
3304                                 continue;
3305                         if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
3306                                 matched = true;
3307                                 break;
3308                         }
3309                 }
3310                 if (matched)
3311                         sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
3312                 else
3313                         sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3314         }
3315         spin_unlock(&squash->rsi_lock);
3316 }
3317
3318 /**
3319  * Parse linkea content to extract information about a given hardlink
3320  *
3321  * \param[in]   ldata      - Initialized linkea data
3322  * \param[in]   linkno     - Link identifier
3323  * \param[out]  parent_fid - The entry's parent FID
3324  * \param[out]  ln         - Entry name destination buffer
3325  *
3326  * \retval 0 on success
3327  * \retval Appropriate negative error code on failure
3328  */
3329 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3330                             struct lu_fid *parent_fid, struct lu_name *ln)
3331 {
3332         unsigned int    idx;
3333         int             rc;
3334         ENTRY;
3335
3336         rc = linkea_init_with_rec(ldata);
3337         if (rc < 0)
3338                 RETURN(rc);
3339
3340         if (linkno >= ldata->ld_leh->leh_reccount)
3341                 /* beyond last link */
3342                 RETURN(-ENODATA);
3343
3344         linkea_first_entry(ldata);
3345         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3346                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3347                                     parent_fid);
3348                 if (idx == linkno)
3349                         break;
3350
3351                 linkea_next_entry(ldata);
3352         }
3353
3354         if (idx < linkno)
3355                 RETURN(-ENODATA);
3356
3357         RETURN(0);
3358 }
3359
3360 /**
3361  * Get parent FID and name of an identified link. Operation is performed for
3362  * a given link number, letting the caller iterate over linkno to list one or
3363  * all links of an entry.
3364  *
3365  * \param[in]     file - File descriptor against which to perform the operation
3366  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3367  *                       on and the available size. It is eventually filled with
3368  *                       the requested information or left untouched on error
3369  *
3370  * \retval - 0 on success
3371  * \retval - Appropriate negative error code on failure
3372  */
3373 int ll_getparent(struct file *file, struct getparent __user *arg)
3374 {
3375         struct inode            *inode = file_inode(file);
3376         struct linkea_data      *ldata;
3377         struct lu_buf            buf = LU_BUF_NULL;
3378         struct lu_name           ln;
3379         struct lu_fid            parent_fid;
3380         __u32                    linkno;
3381         __u32                    name_size;
3382         int                      rc;
3383
3384         ENTRY;
3385
3386         if (!capable(CAP_DAC_READ_SEARCH) &&
3387             !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
3388                 RETURN(-EPERM);
3389
3390         if (get_user(name_size, &arg->gp_name_size))
3391                 RETURN(-EFAULT);
3392
3393         if (get_user(linkno, &arg->gp_linkno))
3394                 RETURN(-EFAULT);
3395
3396         if (name_size > PATH_MAX)
3397                 RETURN(-EINVAL);
3398
3399         OBD_ALLOC(ldata, sizeof(*ldata));
3400         if (ldata == NULL)
3401                 RETURN(-ENOMEM);
3402
3403         rc = linkea_data_new(ldata, &buf);
3404         if (rc < 0)
3405                 GOTO(ldata_free, rc);
3406
3407         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3408                            buf.lb_len, OBD_MD_FLXATTR);
3409         if (rc < 0)
3410                 GOTO(lb_free, rc);
3411
3412         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3413         if (rc < 0)
3414                 GOTO(lb_free, rc);
3415
3416         if (ln.ln_namelen >= name_size)
3417                 GOTO(lb_free, rc = -EOVERFLOW);
3418
3419         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3420                 GOTO(lb_free, rc = -EFAULT);
3421
3422         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3423                 GOTO(lb_free, rc = -EFAULT);
3424
3425         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3426                 GOTO(lb_free, rc = -EFAULT);
3427
3428 lb_free:
3429         lu_buf_free(&buf);
3430 ldata_free:
3431         OBD_FREE(ldata, sizeof(*ldata));
3432
3433         RETURN(rc);
3434 }