Whamcloud - gitweb
2df83df811452f4f92c9bb5e19cc392deca891ed
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/llite/llite_lib.c
32  *
33  * Lustre Light Super operations
34  */
35
36 #define DEBUG_SUBSYSTEM S_LLITE
37
38 #include <linux/cpu.h>
39 #include <linux/module.h>
40 #include <linux/random.h>
41 #include <linux/statfs.h>
42 #include <linux/time.h>
43 #include <linux/types.h>
44 #include <libcfs/linux/linux-uuid.h>
45 #include <linux/version.h>
46 #include <linux/mm.h>
47 #include <linux/user_namespace.h>
48 #include <linux/delay.h>
49 #include <linux/uidgid.h>
50 #include <linux/fs_struct.h>
51
52 #ifndef HAVE_CPUS_READ_LOCK
53 #include <libcfs/linux/linux-cpu.h>
54 #endif
55 #include <uapi/linux/lustre/lustre_ioctl.h>
56 #ifdef HAVE_UAPI_LINUX_MOUNT_H
57 #include <uapi/linux/mount.h>
58 #endif
59
60 #include <lustre_ha.h>
61 #include <lustre_dlm.h>
62 #include <lprocfs_status.h>
63 #include <lustre_disk.h>
64 #include <uapi/linux/lustre/lustre_param.h>
65 #include <lustre_log.h>
66 #include <cl_object.h>
67 #include <obd_cksum.h>
68 #include "llite_internal.h"
69
70 struct kmem_cache *ll_file_data_slab;
71
72 #ifndef log2
73 #define log2(n) ffz(~(n))
74 #endif
75
76 /**
77  * If there is only one number of core visible to Lustre,
78  * async readahead will be disabled, to avoid massive over
79  * subscription, we use 1/2 of active cores as default max
80  * async readahead requests.
81  */
82 static inline unsigned int ll_get_ra_async_max_active(void)
83 {
84         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
85 }
86
87 static struct ll_sb_info *ll_init_sbi(void)
88 {
89         struct ll_sb_info *sbi = NULL;
90         unsigned long pages;
91         unsigned long lru_page_max;
92         struct sysinfo si;
93         int rc;
94         int i;
95
96         ENTRY;
97
98         OBD_ALLOC_PTR(sbi);
99         if (sbi == NULL)
100                 RETURN(ERR_PTR(-ENOMEM));
101
102         rc = pcc_super_init(&sbi->ll_pcc_super);
103         if (rc < 0)
104                 GOTO(out_sbi, rc);
105
106         spin_lock_init(&sbi->ll_lock);
107         mutex_init(&sbi->ll_lco.lco_lock);
108         spin_lock_init(&sbi->ll_pp_extent_lock);
109         spin_lock_init(&sbi->ll_process_lock);
110         sbi->ll_rw_stats_on = 0;
111         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
112
113         si_meminfo(&si);
114         pages = si.totalram - si.totalhigh;
115         lru_page_max = pages / 2;
116
117         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
118         sbi->ll_ra_info.ll_readahead_wq =
119                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
120                                        0, CFS_CPT_ANY,
121                                        sbi->ll_ra_info.ra_async_max_active);
122         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
123                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
124
125         /* initialize ll_cache data */
126         sbi->ll_cache = cl_cache_init(lru_page_max);
127         if (sbi->ll_cache == NULL)
128                 GOTO(out_destroy_ra, rc = -ENOMEM);
129
130         /* initialize foreign symlink prefix path */
131         OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
132         if (sbi->ll_foreign_symlink_prefix == NULL)
133                 GOTO(out_destroy_ra, rc = -ENOMEM);
134         memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/"));
135         sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/");
136
137         /* initialize foreign symlink upcall path, none by default */
138         OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none"));
139         if (sbi->ll_foreign_symlink_upcall == NULL)
140                 GOTO(out_destroy_ra, rc = -ENOMEM);
141         memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none"));
142         sbi->ll_foreign_symlink_upcall_items = NULL;
143         sbi->ll_foreign_symlink_upcall_nb_items = 0;
144         init_rwsem(&sbi->ll_foreign_symlink_sem);
145         /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags)
146          * not enabled by default
147          */
148
149         sbi->ll_ra_info.ra_max_pages =
150                 min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
151         sbi->ll_ra_info.ra_max_pages_per_file =
152                 min(sbi->ll_ra_info.ra_max_pages / 4,
153                     SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
154         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
155                                 sbi->ll_ra_info.ra_max_pages_per_file;
156         sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
157         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
158         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
159
160         sbi->ll_flags |= LL_SBI_VERBOSE;
161 #ifdef ENABLE_CHECKSUM
162         sbi->ll_flags |= LL_SBI_CHECKSUM;
163 #endif
164 #ifdef ENABLE_FLOCK
165         sbi->ll_flags |= LL_SBI_FLOCK;
166 #endif
167
168 #ifdef HAVE_LRU_RESIZE_SUPPORT
169         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
170 #endif
171         sbi->ll_flags |= LL_SBI_LAZYSTATFS;
172
173         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
174                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
175                                pp_r_hist.oh_lock);
176                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
177                                pp_w_hist.oh_lock);
178         }
179
180         /* metadata statahead is enabled by default */
181         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
182         sbi->ll_sa_max = LL_SA_RPC_DEF;
183         atomic_set(&sbi->ll_sa_total, 0);
184         atomic_set(&sbi->ll_sa_wrong, 0);
185         atomic_set(&sbi->ll_sa_running, 0);
186         atomic_set(&sbi->ll_agl_total, 0);
187         sbi->ll_flags |= LL_SBI_AGL_ENABLED;
188         sbi->ll_flags |= LL_SBI_FAST_READ;
189         sbi->ll_flags |= LL_SBI_TINY_WRITE;
190         sbi->ll_flags |= LL_SBI_PARALLEL_DIO;
191         ll_sbi_set_encrypt(sbi, true);
192
193         /* root squash */
194         sbi->ll_squash.rsi_uid = 0;
195         sbi->ll_squash.rsi_gid = 0;
196         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
197         spin_lock_init(&sbi->ll_squash.rsi_lock);
198
199         /* Per-filesystem file heat */
200         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
201         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
202
203         /* Per-fs open heat level before requesting open lock */
204         sbi->ll_oc_thrsh_count = SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT;
205         sbi->ll_oc_max_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS;
206         sbi->ll_oc_thrsh_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MS;
207         RETURN(sbi);
208 out_destroy_ra:
209         if (sbi->ll_foreign_symlink_prefix)
210                 OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
211         if (sbi->ll_cache) {
212                 cl_cache_decref(sbi->ll_cache);
213                 sbi->ll_cache = NULL;
214         }
215         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
216 out_pcc:
217         pcc_super_fini(&sbi->ll_pcc_super);
218 out_sbi:
219         OBD_FREE_PTR(sbi);
220         RETURN(ERR_PTR(rc));
221 }
222
223 static void ll_free_sbi(struct super_block *sb)
224 {
225         struct ll_sb_info *sbi = ll_s2sbi(sb);
226         ENTRY;
227
228         if (sbi != NULL) {
229                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
230                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
231                 if (sbi->ll_ra_info.ll_readahead_wq)
232                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
233                 if (sbi->ll_cache != NULL) {
234                         cl_cache_decref(sbi->ll_cache);
235                         sbi->ll_cache = NULL;
236                 }
237                 if (sbi->ll_foreign_symlink_prefix) {
238                         OBD_FREE(sbi->ll_foreign_symlink_prefix,
239                                  sbi->ll_foreign_symlink_prefix_size);
240                         sbi->ll_foreign_symlink_prefix = NULL;
241                 }
242                 if (sbi->ll_foreign_symlink_upcall) {
243                         OBD_FREE(sbi->ll_foreign_symlink_upcall,
244                                  strlen(sbi->ll_foreign_symlink_upcall) +
245                                        1);
246                         sbi->ll_foreign_symlink_upcall = NULL;
247                 }
248                 if (sbi->ll_foreign_symlink_upcall_items) {
249                         int i;
250                         int nb_items = sbi->ll_foreign_symlink_upcall_nb_items;
251                         struct ll_foreign_symlink_upcall_item *items =
252                                 sbi->ll_foreign_symlink_upcall_items;
253
254                         for (i = 0 ; i < nb_items; i++)
255                                 if (items[i].type == STRING_TYPE)
256                                         OBD_FREE(items[i].string,
257                                                        items[i].size);
258
259                         OBD_FREE_LARGE(items, nb_items *
260                                 sizeof(struct ll_foreign_symlink_upcall_item));
261                         sbi->ll_foreign_symlink_upcall_items = NULL;
262                 }
263                 pcc_super_fini(&sbi->ll_pcc_super);
264                 OBD_FREE(sbi, sizeof(*sbi));
265         }
266         EXIT;
267 }
268
269 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
270 {
271         struct inode *root = NULL;
272         struct ll_sb_info *sbi = ll_s2sbi(sb);
273         struct obd_statfs *osfs = NULL;
274         struct ptlrpc_request *request = NULL;
275         struct obd_connect_data *data = NULL;
276         struct obd_uuid *uuid;
277         struct md_op_data *op_data;
278         struct lustre_md lmd;
279         u64 valid;
280         int size, err, checksum;
281
282         ENTRY;
283         sbi->ll_md_obd = class_name2obd(md);
284         if (!sbi->ll_md_obd) {
285                 CERROR("MD %s: not setup or attached\n", md);
286                 RETURN(-EINVAL);
287         }
288
289         OBD_ALLOC_PTR(data);
290         if (data == NULL)
291                 RETURN(-ENOMEM);
292
293         OBD_ALLOC_PTR(osfs);
294         if (osfs == NULL) {
295                 OBD_FREE_PTR(data);
296                 RETURN(-ENOMEM);
297         }
298
299         /* pass client page size via ocd_grant_blkbits, the server should report
300          * back its backend blocksize for grant calculation purpose */
301         data->ocd_grant_blkbits = PAGE_SHIFT;
302
303         /* indicate MDT features supported by this client */
304         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
305                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
306                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
307                                   OBD_CONNECT_SRVLOCK  |
308                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
309                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
310                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
311                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
312                                   OBD_CONNECT_64BITHASH |
313                                   OBD_CONNECT_EINPROGRESS |
314                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
315                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
316                                   OBD_CONNECT_MAX_EASIZE |
317                                   OBD_CONNECT_FLOCK_DEAD |
318                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
319                                   OBD_CONNECT_OPEN_BY_FID |
320                                   OBD_CONNECT_DIR_STRIPE |
321                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
322                                   OBD_CONNECT_SUBTREE |
323                                   OBD_CONNECT_MULTIMODRPCS |
324                                   OBD_CONNECT_GRANT_PARAM |
325                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
326
327         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
328                                    OBD_CONNECT2_SUM_STATFS |
329                                    OBD_CONNECT2_OVERSTRIPING |
330                                    OBD_CONNECT2_FLR |
331                                    OBD_CONNECT2_LOCK_CONVERT |
332                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
333                                    OBD_CONNECT2_INC_XID |
334                                    OBD_CONNECT2_LSOM |
335                                    OBD_CONNECT2_ASYNC_DISCARD |
336                                    OBD_CONNECT2_PCC |
337                                    OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
338                                    OBD_CONNECT2_GETATTR_PFID |
339                                    OBD_CONNECT2_DOM_LVB |
340                                    OBD_CONNECT2_REP_MBITS |
341                                    OBD_CONNECT2_ATOMIC_OPEN_LOCK;
342
343 #ifdef HAVE_LRU_RESIZE_SUPPORT
344         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
345                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
346 #endif
347         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
348
349         data->ocd_cksum_types = obd_cksum_types_supported_client();
350
351         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
352                 /* flag mdc connection as lightweight, only used for test
353                  * purpose, use with care */
354                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
355
356         data->ocd_ibits_known = MDS_INODELOCK_FULL;
357         data->ocd_version = LUSTRE_VERSION_CODE;
358
359         if (sb->s_flags & SB_RDONLY)
360                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
361         if (sbi->ll_flags & LL_SBI_USER_XATTR)
362                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
363
364 #ifdef SB_NOSEC
365         /* Setting this indicates we correctly support S_NOSEC (See kernel
366          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
367          */
368         sb->s_flags |= SB_NOSEC;
369 #endif
370         sbi->ll_fop = ll_select_file_operations(sbi);
371
372         /* always ping even if server suppress_pings */
373         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
374                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
375
376         obd_connect_set_secctx(data);
377         if (ll_sbi_has_encrypt(sbi))
378                 obd_connect_set_enc(data);
379
380 #if defined(CONFIG_SECURITY)
381         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
382 #endif
383
384         data->ocd_brw_size = MD_MAX_BRW_SIZE;
385
386         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
387                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
388         if (err == -EBUSY) {
389                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
390                                    "recovery, of which this client is not a "
391                                    "part. Please wait for recovery to complete,"
392                                    " abort, or time out.\n", md);
393                 GOTO(out, err);
394         } else if (err) {
395                 CERROR("cannot connect to %s: rc = %d\n", md, err);
396                 GOTO(out, err);
397         }
398
399         sbi->ll_md_exp->exp_connect_data = *data;
400
401         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
402                            LUSTRE_SEQ_METADATA);
403         if (err) {
404                 CERROR("%s: Can't init metadata layer FID infrastructure, "
405                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
406                 GOTO(out_md, err);
407         }
408
409         /* For mount, we only need fs info from MDT0, and also in DNE, it
410          * can make sure the client can be mounted as long as MDT0 is
411          * avaible */
412         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
413                         ktime_get_seconds() - sbi->ll_statfs_max_age,
414                         OBD_STATFS_FOR_MDT0);
415         if (err)
416                 GOTO(out_md_fid, err);
417
418         /* This needs to be after statfs to ensure connect has finished.
419          * Note that "data" does NOT contain the valid connect reply.
420          * If connecting to a 1.8 server there will be no LMV device, so
421          * we can access the MDC export directly and exp_connect_flags will
422          * be non-zero, but if accessing an upgraded 2.1 server it will
423          * have the correct flags filled in.
424          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
425         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
426         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
427             valid != CLIENT_CONNECT_MDT_REQD) {
428                 char *buf;
429
430                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
431                 obd_connect_flags2str(buf, PAGE_SIZE,
432                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
433                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
434                                    "feature(s) needed for correct operation "
435                                    "of this client (%s). Please upgrade "
436                                    "server or downgrade client.\n",
437                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
438                 OBD_FREE(buf, PAGE_SIZE);
439                 GOTO(out_md_fid, err = -EPROTO);
440         }
441
442         size = sizeof(*data);
443         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
444                            KEY_CONN_DATA,  &size, data);
445         if (err) {
446                 CERROR("%s: Get connect data failed: rc = %d\n",
447                        sbi->ll_md_exp->exp_obd->obd_name, err);
448                 GOTO(out_md_fid, err);
449         }
450
451         LASSERT(osfs->os_bsize);
452         sb->s_blocksize = osfs->os_bsize;
453         sb->s_blocksize_bits = log2(osfs->os_bsize);
454         sb->s_magic = LL_SUPER_MAGIC;
455         sb->s_maxbytes = MAX_LFS_FILESIZE;
456         sbi->ll_namelen = osfs->os_namelen;
457         sbi->ll_mnt.mnt = current->fs->root.mnt;
458
459         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
460             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
461                 LCONSOLE_INFO("Disabling user_xattr feature because "
462                               "it is not supported on the server\n");
463                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
464         }
465
466         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
467 #ifdef SB_POSIXACL
468                 sb->s_flags |= SB_POSIXACL;
469 #endif
470                 sbi->ll_flags |= LL_SBI_ACL;
471         } else {
472                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
473 #ifdef SB_POSIXACL
474                 sb->s_flags &= ~SB_POSIXACL;
475 #endif
476                 sbi->ll_flags &= ~LL_SBI_ACL;
477         }
478
479         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
480                 sbi->ll_flags |= LL_SBI_64BIT_HASH;
481
482         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
483                 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
484
485         if (obd_connect_has_secctx(data))
486                 sbi->ll_flags |= LL_SBI_FILE_SECCTX;
487
488         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
489                 if (ll_sbi_has_test_dummy_encryption(sbi))
490                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
491                                       sbi->ll_fsname,
492                                       sbi->ll_md_exp->exp_obd->obd_name);
493                 ll_sbi_set_encrypt(sbi, false);
494         }
495
496         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
497                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
498                         LCONSOLE_INFO("%s: disabling xattr cache due to "
499                                       "unknown maximum xattr size.\n", dt);
500                 } else if (!sbi->ll_xattr_cache_set) {
501                         /* If xattr_cache is already set (no matter 0 or 1)
502                          * during processing llog, it won't be enabled here. */
503                         sbi->ll_flags |= LL_SBI_XATTR_CACHE;
504                         sbi->ll_xattr_cache_enabled = 1;
505                 }
506         }
507
508         sbi->ll_dt_obd = class_name2obd(dt);
509         if (!sbi->ll_dt_obd) {
510                 CERROR("DT %s: not setup or attached\n", dt);
511                 GOTO(out_md_fid, err = -ENODEV);
512         }
513
514         /* pass client page size via ocd_grant_blkbits, the server should report
515          * back its backend blocksize for grant calculation purpose */
516         data->ocd_grant_blkbits = PAGE_SHIFT;
517
518         /* indicate OST features supported by this client */
519         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
520                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
521                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
522                                   OBD_CONNECT_SRVLOCK |
523                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
524                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
525                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
526                                   OBD_CONNECT_EINPROGRESS |
527                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
528                                   OBD_CONNECT_LAYOUTLOCK |
529                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
530                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
531                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
532         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
533                                    OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK |
534                                    OBD_CONNECT2_REP_MBITS;
535
536         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
537                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
538
539         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
540          * disabled by default, because it can still be enabled on the
541          * fly via /sys. As a consequence, we still need to come to an
542          * agreement on the supported algorithms at connect time
543          */
544         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
545
546         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
547                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
548         else
549                 data->ocd_cksum_types = obd_cksum_types_supported_client();
550
551 #ifdef HAVE_LRU_RESIZE_SUPPORT
552         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
553 #endif
554         /* always ping even if server suppress_pings */
555         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
556                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
557
558         if (ll_sbi_has_encrypt(sbi))
559                 obd_connect_set_enc(data);
560
561         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
562                "ocd_grant: %d\n", data->ocd_connect_flags,
563                data->ocd_version, data->ocd_grant);
564
565         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
566         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
567
568         data->ocd_brw_size = DT_MAX_BRW_SIZE;
569
570         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
571                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
572         if (err == -EBUSY) {
573                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
574                                    "recovery, of which this client is not a "
575                                    "part.  Please wait for recovery to "
576                                    "complete, abort, or time out.\n", dt);
577                 GOTO(out_md, err);
578         } else if (err) {
579                 CERROR("%s: Cannot connect to %s: rc = %d\n",
580                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
581                 GOTO(out_md, err);
582         }
583
584         if (ll_sbi_has_encrypt(sbi) &&
585             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
586                 if (ll_sbi_has_test_dummy_encryption(sbi))
587                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
588                                       sbi->ll_fsname, dt);
589                 ll_sbi_set_encrypt(sbi, false);
590         } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
591                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
592         }
593
594         sbi->ll_dt_exp->exp_connect_data = *data;
595
596         /* Don't change value if it was specified in the config log */
597         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
598                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
599                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
600                               (data->ocd_brw_size >> PAGE_SHIFT));
601                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
602                     sbi->ll_ra_info.ra_max_pages_per_file)
603                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
604                                 sbi->ll_ra_info.ra_max_pages_per_file;
605         }
606
607         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
608                            LUSTRE_SEQ_METADATA);
609         if (err) {
610                 CERROR("%s: Can't init data layer FID infrastructure, "
611                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
612                 GOTO(out_dt, err);
613         }
614
615         mutex_lock(&sbi->ll_lco.lco_lock);
616         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
617         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
618         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
619         mutex_unlock(&sbi->ll_lco.lco_lock);
620
621         fid_zero(&sbi->ll_root_fid);
622         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
623                            &sbi->ll_root_fid);
624         if (err) {
625                 CERROR("cannot mds_connect: rc = %d\n", err);
626                 GOTO(out_lock_cn_cb, err);
627         }
628         if (!fid_is_sane(&sbi->ll_root_fid)) {
629                 CERROR("%s: Invalid root fid "DFID" during mount\n",
630                        sbi->ll_md_exp->exp_obd->obd_name,
631                        PFID(&sbi->ll_root_fid));
632                 GOTO(out_lock_cn_cb, err = -EINVAL);
633         }
634         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
635
636         sb->s_op = &lustre_super_operations;
637         sb->s_xattr = ll_xattr_handlers;
638 #if THREAD_SIZE >= 8192 /*b=17630*/
639         sb->s_export_op = &lustre_export_operations;
640 #endif
641 #ifdef HAVE_LUSTRE_CRYPTO
642         llcrypt_set_ops(sb, &lustre_cryptops);
643 #endif
644
645         /* make root inode
646          * XXX: move this to after cbd setup? */
647         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
648         if (sbi->ll_flags & LL_SBI_ACL)
649                 valid |= OBD_MD_FLACL;
650
651         OBD_ALLOC_PTR(op_data);
652         if (op_data == NULL)
653                 GOTO(out_lock_cn_cb, err = -ENOMEM);
654
655         op_data->op_fid1 = sbi->ll_root_fid;
656         op_data->op_mode = 0;
657         op_data->op_valid = valid;
658
659         err = md_getattr(sbi->ll_md_exp, op_data, &request);
660
661         OBD_FREE_PTR(op_data);
662         if (err) {
663                 CERROR("%s: md_getattr failed for root: rc = %d\n",
664                        sbi->ll_md_exp->exp_obd->obd_name, err);
665                 GOTO(out_lock_cn_cb, err);
666         }
667
668         err = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill,
669                                sbi->ll_dt_exp, sbi->ll_md_exp, &lmd);
670         if (err) {
671                 CERROR("failed to understand root inode md: rc = %d\n", err);
672                 ptlrpc_req_finished(request);
673                 GOTO(out_lock_cn_cb, err);
674         }
675
676         LASSERT(fid_is_sane(&sbi->ll_root_fid));
677         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
678                                             sbi->ll_flags & LL_SBI_32BIT_API),
679                        &lmd);
680         md_free_lustre_md(sbi->ll_md_exp, &lmd);
681         ptlrpc_req_finished(request);
682
683         if (IS_ERR(root)) {
684                 lmd_clear_acl(&lmd);
685                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
686                 root = NULL;
687                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
688                        sbi->ll_fsname, err);
689                 GOTO(out_root, err);
690         }
691
692         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
693         if (sbi->ll_checksum_set) {
694                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
695                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
696                                          sizeof(checksum), &checksum, NULL);
697                 if (err) {
698                         CERROR("%s: Set checksum failed: rc = %d\n",
699                                sbi->ll_dt_exp->exp_obd->obd_name, err);
700                         GOTO(out_root, err);
701                 }
702         }
703         cl_sb_init(sb);
704
705         sb->s_root = d_make_root(root);
706         if (sb->s_root == NULL) {
707                 err = -ENOMEM;
708                 CERROR("%s: can't make root dentry: rc = %d\n",
709                        sbi->ll_fsname, err);
710                 GOTO(out_root, err);
711         }
712
713         sbi->ll_sdev_orig = sb->s_dev;
714
715         /* We set sb->s_dev equal on all lustre clients in order to support
716          * NFS export clustering.  NFSD requires that the FSID be the same
717          * on all clients. */
718         /* s_dev is also used in lt_compare() to compare two fs, but that is
719          * only a node-local comparison. */
720         uuid = obd_get_uuid(sbi->ll_md_exp);
721         if (uuid != NULL)
722                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
723
724         if (data != NULL)
725                 OBD_FREE_PTR(data);
726         if (osfs != NULL)
727                 OBD_FREE_PTR(osfs);
728
729         if (sbi->ll_dt_obd) {
730                 err = sysfs_create_link(&sbi->ll_kset.kobj,
731                                         &sbi->ll_dt_obd->obd_kset.kobj,
732                                         sbi->ll_dt_obd->obd_type->typ_name);
733                 if (err < 0) {
734                         CERROR("%s: could not register %s in llite: rc = %d\n",
735                                dt, sbi->ll_fsname, err);
736                         err = 0;
737                 }
738         }
739
740         if (sbi->ll_md_obd) {
741                 err = sysfs_create_link(&sbi->ll_kset.kobj,
742                                         &sbi->ll_md_obd->obd_kset.kobj,
743                                         sbi->ll_md_obd->obd_type->typ_name);
744                 if (err < 0) {
745                         CERROR("%s: could not register %s in llite: rc = %d\n",
746                                md, sbi->ll_fsname, err);
747                         err = 0;
748                 }
749         }
750
751         RETURN(err);
752 out_root:
753         iput(root);
754 out_lock_cn_cb:
755         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
756 out_dt:
757         obd_disconnect(sbi->ll_dt_exp);
758         sbi->ll_dt_exp = NULL;
759         sbi->ll_dt_obd = NULL;
760 out_md_fid:
761         obd_fid_fini(sbi->ll_md_exp->exp_obd);
762 out_md:
763         obd_disconnect(sbi->ll_md_exp);
764         sbi->ll_md_exp = NULL;
765         sbi->ll_md_obd = NULL;
766 out:
767         if (data != NULL)
768                 OBD_FREE_PTR(data);
769         if (osfs != NULL)
770                 OBD_FREE_PTR(osfs);
771         return err;
772 }
773
774 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
775 {
776         int size, rc;
777
778         size = sizeof(*lmmsize);
779         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
780                           KEY_MAX_EASIZE, &size, lmmsize);
781         if (rc != 0) {
782                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
783                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
784                 RETURN(rc);
785         }
786
787         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
788
789         size = sizeof(int);
790         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
791                           KEY_MAX_EASIZE, &size, lmmsize);
792         if (rc)
793                 CERROR("Get max mdsize error rc %d\n", rc);
794
795         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
796
797         RETURN(rc);
798 }
799
800 /**
801  * Get the value of the default_easize parameter.
802  *
803  * \see client_obd::cl_default_mds_easize
804  *
805  * \param[in] sbi       superblock info for this filesystem
806  * \param[out] lmmsize  pointer to storage location for value
807  *
808  * \retval 0            on success
809  * \retval negative     negated errno on failure
810  */
811 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
812 {
813         int size, rc;
814
815         size = sizeof(int);
816         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
817                          KEY_DEFAULT_EASIZE, &size, lmmsize);
818         if (rc)
819                 CERROR("Get default mdsize error rc %d\n", rc);
820
821         RETURN(rc);
822 }
823
824 /**
825  * Set the default_easize parameter to the given value.
826  *
827  * \see client_obd::cl_default_mds_easize
828  *
829  * \param[in] sbi       superblock info for this filesystem
830  * \param[in] lmmsize   the size to set
831  *
832  * \retval 0            on success
833  * \retval negative     negated errno on failure
834  */
835 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
836 {
837         int rc;
838
839         if (lmmsize < sizeof(struct lov_mds_md) ||
840             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
841                 return -EINVAL;
842
843         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
844                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
845                                 sizeof(int), &lmmsize, NULL);
846
847         RETURN(rc);
848 }
849
850 static void client_common_put_super(struct super_block *sb)
851 {
852         struct ll_sb_info *sbi = ll_s2sbi(sb);
853         ENTRY;
854
855         cl_sb_fini(sb);
856
857         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
858         obd_disconnect(sbi->ll_dt_exp);
859         sbi->ll_dt_exp = NULL;
860
861         ll_debugfs_unregister_super(sb);
862
863         obd_fid_fini(sbi->ll_md_exp->exp_obd);
864         obd_disconnect(sbi->ll_md_exp);
865         sbi->ll_md_exp = NULL;
866
867         EXIT;
868 }
869
870 void ll_kill_super(struct super_block *sb)
871 {
872         struct ll_sb_info *sbi;
873         ENTRY;
874
875         /* not init sb ?*/
876         if (!(sb->s_flags & SB_ACTIVE))
877                 return;
878
879         sbi = ll_s2sbi(sb);
880         /* we need restore s_dev from changed for clustred NFS before put_super
881          * because new kernels have cached s_dev and change sb->s_dev in
882          * put_super not affected real removing devices */
883         if (sbi) {
884                 sb->s_dev = sbi->ll_sdev_orig;
885
886                 /* wait running statahead threads to quit */
887                 while (atomic_read(&sbi->ll_sa_running) > 0)
888                         schedule_timeout_uninterruptible(
889                                 cfs_time_seconds(1) >> 3);
890         }
891
892         EXIT;
893 }
894
895 static inline int ll_set_opt(const char *opt, char *data, int fl)
896 {
897         if (strncmp(opt, data, strlen(opt)) != 0)
898                 return 0;
899         else
900                 return fl;
901 }
902
903 /* non-client-specific mount options are parsed in lmd_parse */
904 static int ll_options(char *options, struct ll_sb_info *sbi)
905 {
906         int tmp;
907         char *s1 = options, *s2;
908         int *flags = &sbi->ll_flags;
909         ENTRY;
910
911         if (!options)
912                 RETURN(0);
913
914         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
915
916         while (*s1) {
917                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
918                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
919                 if (tmp) {
920                         *flags |= tmp;
921                         goto next;
922                 }
923                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
924                 if (tmp) {
925                         *flags = (*flags & ~LL_SBI_LOCALFLOCK) | tmp;
926                         goto next;
927                 }
928                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
929                 if (tmp) {
930                         *flags = (*flags & ~LL_SBI_FLOCK) | tmp;
931                         goto next;
932                 }
933                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
934                 if (tmp) {
935                         *flags &= ~tmp;
936                         goto next;
937                 }
938                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
939                 if (tmp) {
940                         *flags |= tmp;
941                         goto next;
942                 }
943                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
944                 if (tmp) {
945                         *flags &= ~tmp;
946                         goto next;
947                 }
948                 tmp = ll_set_opt("context", s1, 1);
949                 if (tmp)
950                         goto next;
951                 tmp = ll_set_opt("fscontext", s1, 1);
952                 if (tmp)
953                         goto next;
954                 tmp = ll_set_opt("defcontext", s1, 1);
955                 if (tmp)
956                         goto next;
957                 tmp = ll_set_opt("rootcontext", s1, 1);
958                 if (tmp)
959                         goto next;
960                 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
961                 if (tmp) {
962                         *flags |= tmp;
963                         goto next;
964                 }
965                 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
966                 if (tmp) {
967                         *flags &= ~tmp;
968                         goto next;
969                 }
970
971                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
972                 if (tmp) {
973                         *flags |= tmp;
974                         sbi->ll_checksum_set = 1;
975                         goto next;
976                 }
977                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
978                 if (tmp) {
979                         *flags &= ~tmp;
980                         sbi->ll_checksum_set = 1;
981                         goto next;
982                 }
983                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
984                 if (tmp) {
985                         *flags |= tmp;
986                         goto next;
987                 }
988                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
989                 if (tmp) {
990                         *flags &= ~tmp;
991                         goto next;
992                 }
993                 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
994                 if (tmp) {
995                         *flags |= tmp;
996                         goto next;
997                 }
998                 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
999                 if (tmp) {
1000                         *flags &= ~tmp;
1001                         goto next;
1002                 }
1003                 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
1004                 if (tmp) {
1005                         *flags |= tmp;
1006                         goto next;
1007                 }
1008                 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
1009                 if (tmp) {
1010                         *flags |= tmp;
1011                         goto next;
1012                 }
1013                 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
1014                 if (tmp) {
1015                         *flags &= ~tmp;
1016                         goto next;
1017                 }
1018                 tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
1019                 if (tmp) {
1020                         *flags |= tmp;
1021                         goto next;
1022                 }
1023                 tmp = ll_set_opt("test_dummy_encryption", s1,
1024                                  LL_SBI_TEST_DUMMY_ENCRYPTION);
1025                 if (tmp) {
1026 #ifdef HAVE_LUSTRE_CRYPTO
1027                         *flags |= tmp;
1028 #else
1029                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
1030 #endif
1031                         goto next;
1032                 }
1033                 tmp = ll_set_opt("noencrypt", s1, LL_SBI_ENCRYPT);
1034                 if (tmp) {
1035 #ifdef HAVE_LUSTRE_CRYPTO
1036                         *flags &= ~tmp;
1037 #else
1038                         LCONSOLE_WARN("noencrypt mount option ignored: encryption not supported\n");
1039 #endif
1040                         goto next;
1041                 }
1042                 tmp = ll_set_opt("foreign_symlink", s1, LL_SBI_FOREIGN_SYMLINK);
1043                 if (tmp) {
1044                         int prefix_pos = sizeof("foreign_symlink=") - 1;
1045                         int equal_pos = sizeof("foreign_symlink=") - 2;
1046
1047                         /* non-default prefix provided ? */
1048                         if (strlen(s1) >= sizeof("foreign_symlink=") &&
1049                             *(s1 + equal_pos) == '=') {
1050                                 char *old = sbi->ll_foreign_symlink_prefix;
1051                                 size_t old_len =
1052                                         sbi->ll_foreign_symlink_prefix_size;
1053
1054                                 /* path must be absolute */
1055                                 if (*(s1 + sizeof("foreign_symlink=")
1056                                       - 1) != '/') {
1057                                         LCONSOLE_ERROR_MSG(0x152,
1058                                                 "foreign prefix '%s' must be an absolute path\n",
1059                                                 s1 + prefix_pos);
1060                                         RETURN(-EINVAL);
1061                                 }
1062                                 /* last option ? */
1063                                 s2 = strchrnul(s1 + prefix_pos, ',');
1064
1065                                 if (sbi->ll_foreign_symlink_prefix) {
1066                                         sbi->ll_foreign_symlink_prefix = NULL;
1067                                         sbi->ll_foreign_symlink_prefix_size = 0;
1068                                 }
1069                                 /* alloc for path length and '\0' */
1070                                 OBD_ALLOC(sbi->ll_foreign_symlink_prefix,
1071                                                 s2 - (s1 + prefix_pos) + 1);
1072                                 if (!sbi->ll_foreign_symlink_prefix) {
1073                                         /* restore previous */
1074                                         sbi->ll_foreign_symlink_prefix = old;
1075                                         sbi->ll_foreign_symlink_prefix_size =
1076                                                 old_len;
1077                                         RETURN(-ENOMEM);
1078                                 }
1079                                 if (old)
1080                                         OBD_FREE(old, old_len);
1081                                 strncpy(sbi->ll_foreign_symlink_prefix,
1082                                         s1 + prefix_pos,
1083                                         s2 - (s1 + prefix_pos));
1084                                 sbi->ll_foreign_symlink_prefix_size =
1085                                         s2 - (s1 + prefix_pos) + 1;
1086                         } else {
1087                                 LCONSOLE_ERROR_MSG(0x152,
1088                                                    "invalid %s option\n", s1);
1089                         }
1090                         /* enable foreign symlink support */
1091                         *flags |= tmp;
1092                         goto next;
1093                 }
1094                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
1095                                    s1);
1096                 RETURN(-EINVAL);
1097
1098 next:
1099                 /* Find next opt */
1100                 s2 = strchr(s1, ',');
1101                 if (s2 == NULL)
1102                         break;
1103                 s1 = s2 + 1;
1104         }
1105         RETURN(0);
1106 }
1107
1108 void ll_lli_init(struct ll_inode_info *lli)
1109 {
1110         lli->lli_inode_magic = LLI_INODE_MAGIC;
1111         lli->lli_flags = 0;
1112         rwlock_init(&lli->lli_lock);
1113         lli->lli_posix_acl = NULL;
1114         /* Do not set lli_fid, it has been initialized already. */
1115         fid_zero(&lli->lli_pfid);
1116         lli->lli_mds_read_och = NULL;
1117         lli->lli_mds_write_och = NULL;
1118         lli->lli_mds_exec_och = NULL;
1119         lli->lli_open_fd_read_count = 0;
1120         lli->lli_open_fd_write_count = 0;
1121         lli->lli_open_fd_exec_count = 0;
1122         mutex_init(&lli->lli_och_mutex);
1123         spin_lock_init(&lli->lli_agl_lock);
1124         spin_lock_init(&lli->lli_layout_lock);
1125         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1126         lli->lli_clob = NULL;
1127
1128         init_rwsem(&lli->lli_xattrs_list_rwsem);
1129         mutex_init(&lli->lli_xattrs_enq_lock);
1130
1131         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1132         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1133                 lli->lli_opendir_key = NULL;
1134                 lli->lli_sai = NULL;
1135                 spin_lock_init(&lli->lli_sa_lock);
1136                 lli->lli_opendir_pid = 0;
1137                 lli->lli_sa_enabled = 0;
1138                 init_rwsem(&lli->lli_lsm_sem);
1139         } else {
1140                 mutex_init(&lli->lli_size_mutex);
1141                 mutex_init(&lli->lli_setattr_mutex);
1142                 lli->lli_symlink_name = NULL;
1143                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1144                 range_lock_tree_init(&lli->lli_write_tree);
1145                 init_rwsem(&lli->lli_glimpse_sem);
1146                 lli->lli_glimpse_time = ktime_set(0, 0);
1147                 INIT_LIST_HEAD(&lli->lli_agl_list);
1148                 lli->lli_agl_index = 0;
1149                 lli->lli_async_rc = 0;
1150                 spin_lock_init(&lli->lli_heat_lock);
1151                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1152                 lli->lli_heat_flags = 0;
1153                 mutex_init(&lli->lli_pcc_lock);
1154                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1155                 lli->lli_pcc_inode = NULL;
1156                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1157                 lli->lli_pcc_generation = 0;
1158                 mutex_init(&lli->lli_group_mutex);
1159                 lli->lli_group_users = 0;
1160                 lli->lli_group_gid = 0;
1161         }
1162         mutex_init(&lli->lli_layout_mutex);
1163         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1164         /* ll_cl_context initialize */
1165         INIT_LIST_HEAD(&lli->lli_lccs);
1166 }
1167
1168 #define MAX_STRING_SIZE 128
1169
1170 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1171
1172 #define LSI_BDI_INITIALIZED     0x00400000
1173
1174 #ifndef HAVE_BDI_CAP_MAP_COPY
1175 # define BDI_CAP_MAP_COPY       0
1176 #endif
1177
1178 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1179 {
1180         struct  lustre_sb_info *lsi = s2lsi(sb);
1181         char buf[MAX_STRING_SIZE];
1182         va_list args;
1183         int err;
1184
1185         err = bdi_init(&lsi->lsi_bdi);
1186         if (err)
1187                 return err;
1188
1189         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1190         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1191         lsi->lsi_bdi.name = "lustre";
1192         va_start(args, fmt);
1193         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1194         va_end(args);
1195         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1196         va_end(args);
1197         if (!err)
1198                 sb->s_bdi = &lsi->lsi_bdi;
1199
1200         return err;
1201 }
1202 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1203
1204 int ll_fill_super(struct super_block *sb)
1205 {
1206         struct  lustre_profile *lprof = NULL;
1207         struct  lustre_sb_info *lsi = s2lsi(sb);
1208         struct  ll_sb_info *sbi = NULL;
1209         char    *dt = NULL, *md = NULL;
1210         char    *profilenm = get_profile_name(sb);
1211         struct config_llog_instance *cfg;
1212         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1213         const int instlen = LUSTRE_MAXINSTANCE + 2;
1214         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1215         char name[MAX_STRING_SIZE];
1216         int md_len = 0;
1217         int dt_len = 0;
1218         uuid_t uuid;
1219         char *ptr;
1220         int len;
1221         int err;
1222
1223         ENTRY;
1224         /* for ASLR, to map between cfg_instance and hashed ptr */
1225         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1226                profilenm, cfg_instance, sb);
1227
1228         OBD_RACE(OBD_FAIL_LLITE_RACE_MOUNT);
1229
1230         OBD_ALLOC_PTR(cfg);
1231         if (cfg == NULL)
1232                 GOTO(out_free_cfg, err = -ENOMEM);
1233
1234         /* client additional sb info */
1235         lsi->lsi_llsbi = sbi = ll_init_sbi();
1236         if (IS_ERR(sbi))
1237                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1238
1239         err = ll_options(lsi->lsi_lmd->lmd_opts, sbi);
1240         if (err)
1241                 GOTO(out_free_cfg, err);
1242
1243         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1244         sb->s_d_op = &ll_d_ops;
1245
1246         /* UUID handling */
1247         generate_random_uuid(uuid.b);
1248         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1249
1250         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1251
1252         /* Get fsname */
1253         len = strlen(profilenm);
1254         ptr = strrchr(profilenm, '-');
1255         if (ptr && (strcmp(ptr, "-client") == 0))
1256                 len -= 7;
1257
1258         if (len > LUSTRE_MAXFSNAME) {
1259                 if (unlikely(len >= MAX_STRING_SIZE))
1260                         len = MAX_STRING_SIZE - 1;
1261                 strncpy(name, profilenm, len);
1262                 name[len] = '\0';
1263                 err = -ENAMETOOLONG;
1264                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1265                        name, LUSTRE_MAXFSNAME, err);
1266                 GOTO(out_free_cfg, err);
1267         }
1268         strncpy(sbi->ll_fsname, profilenm, len);
1269         sbi->ll_fsname[len] = '\0';
1270
1271         /* Mount info */
1272         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1273                  profilenm, cfg_instance);
1274
1275         err = super_setup_bdi_name(sb, "%s", name);
1276         if (err)
1277                 GOTO(out_free_cfg, err);
1278
1279         /* Call ll_debugfs_register_super() before lustre_process_log()
1280          * so that "llite.*.*" params can be processed correctly.
1281          */
1282         err = ll_debugfs_register_super(sb, name);
1283         if (err < 0) {
1284                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1285                        sbi->ll_fsname, err);
1286                 err = 0;
1287         }
1288
1289         /* The cfg_instance is a value unique to this super, in case some
1290          * joker tries to mount the same fs at two mount points.
1291          */
1292         cfg->cfg_instance = cfg_instance;
1293         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1294         cfg->cfg_callback = class_config_llog_handler;
1295         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1296         /* set up client obds */
1297         err = lustre_process_log(sb, profilenm, cfg);
1298         if (err < 0)
1299                 GOTO(out_debugfs, err);
1300
1301         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1302         lprof = class_get_profile(profilenm);
1303         if (lprof == NULL) {
1304                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1305                                    " read from the MGS.  Does that filesystem "
1306                                    "exist?\n", profilenm);
1307                 GOTO(out_debugfs, err = -EINVAL);
1308         }
1309         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1310                lprof->lp_md, lprof->lp_dt);
1311
1312         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1313         OBD_ALLOC(dt, dt_len);
1314         if (!dt)
1315                 GOTO(out_profile, err = -ENOMEM);
1316         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1317
1318         md_len = strlen(lprof->lp_md) + instlen + 2;
1319         OBD_ALLOC(md, md_len);
1320         if (!md)
1321                 GOTO(out_free_dt, err = -ENOMEM);
1322         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1323
1324         /* connections, registrations, sb setup */
1325         err = client_common_fill_super(sb, md, dt);
1326         if (err < 0)
1327                 GOTO(out_free_md, err);
1328
1329         sbi->ll_client_common_fill_super_succeeded = 1;
1330
1331 out_free_md:
1332         if (md)
1333                 OBD_FREE(md, md_len);
1334 out_free_dt:
1335         if (dt)
1336                 OBD_FREE(dt, dt_len);
1337 out_profile:
1338         if (lprof)
1339                 class_put_profile(lprof);
1340 out_debugfs:
1341         if (err < 0)
1342                 ll_debugfs_unregister_super(sb);
1343 out_free_cfg:
1344         if (cfg)
1345                 OBD_FREE_PTR(cfg);
1346
1347         if (err)
1348                 ll_put_super(sb);
1349         else if (sbi->ll_flags & LL_SBI_VERBOSE)
1350                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1351         RETURN(err);
1352 } /* ll_fill_super */
1353
1354 void ll_put_super(struct super_block *sb)
1355 {
1356         struct config_llog_instance cfg, params_cfg;
1357         struct obd_device *obd;
1358         struct lustre_sb_info *lsi = s2lsi(sb);
1359         struct ll_sb_info *sbi = ll_s2sbi(sb);
1360         char *profilenm = get_profile_name(sb);
1361         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1362         long ccc_count;
1363         int next, force = 1, rc = 0;
1364         ENTRY;
1365
1366         if (IS_ERR(sbi))
1367                 GOTO(out_no_sbi, 0);
1368
1369         /* Should replace instance_id with something better for ASLR */
1370         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1371                profilenm, cfg_instance, sb);
1372
1373         cfg.cfg_instance = cfg_instance;
1374         lustre_end_log(sb, profilenm, &cfg);
1375
1376         params_cfg.cfg_instance = cfg_instance;
1377         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1378
1379         if (sbi->ll_md_exp) {
1380                 obd = class_exp2obd(sbi->ll_md_exp);
1381                 if (obd)
1382                         force = obd->obd_force;
1383         }
1384
1385         /* Wait for unstable pages to be committed to stable storage */
1386         if (force == 0) {
1387                 rc = l_wait_event_abortable(
1388                         sbi->ll_cache->ccc_unstable_waitq,
1389                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1390         }
1391
1392         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1393         if (force == 0 && rc != -ERESTARTSYS)
1394                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1395
1396         /* We need to set force before the lov_disconnect in
1397          * lustre_common_put_super, since l_d cleans up osc's as well.
1398          */
1399         if (force) {
1400                 next = 0;
1401                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1402                                                      &next)) != NULL) {
1403                         obd->obd_force = force;
1404                 }
1405         }
1406
1407         if (sbi->ll_client_common_fill_super_succeeded) {
1408                 /* Only if client_common_fill_super succeeded */
1409                 client_common_put_super(sb);
1410         }
1411
1412         next = 0;
1413         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1414                 class_manual_cleanup(obd);
1415
1416         if (sbi->ll_flags & LL_SBI_VERBOSE)
1417                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1418
1419         if (profilenm)
1420                 class_del_profile(profilenm);
1421
1422 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1423         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1424                 bdi_destroy(&lsi->lsi_bdi);
1425                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1426         }
1427 #endif
1428
1429         ll_free_sbi(sb);
1430         lsi->lsi_llsbi = NULL;
1431 out_no_sbi:
1432         lustre_common_put_super(sb);
1433
1434         cl_env_cache_purge(~0);
1435
1436         EXIT;
1437 } /* client_put_super */
1438
1439 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1440 {
1441         struct inode *inode = NULL;
1442
1443         /* NOTE: we depend on atomic igrab() -bzzz */
1444         lock_res_and_lock(lock);
1445         if (lock->l_resource->lr_lvb_inode) {
1446                 struct ll_inode_info * lli;
1447                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1448                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1449                         inode = igrab(lock->l_resource->lr_lvb_inode);
1450                 } else {
1451                         inode = lock->l_resource->lr_lvb_inode;
1452                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1453                                          D_WARNING, lock, "lr_lvb_inode %p is "
1454                                          "bogus: magic %08x",
1455                                          lock->l_resource->lr_lvb_inode,
1456                                          lli->lli_inode_magic);
1457                         inode = NULL;
1458                 }
1459         }
1460         unlock_res_and_lock(lock);
1461         return inode;
1462 }
1463
1464 void ll_dir_clear_lsm_md(struct inode *inode)
1465 {
1466         struct ll_inode_info *lli = ll_i2info(inode);
1467
1468         LASSERT(S_ISDIR(inode->i_mode));
1469
1470         if (lli->lli_lsm_md) {
1471                 lmv_free_memmd(lli->lli_lsm_md);
1472                 lli->lli_lsm_md = NULL;
1473         }
1474
1475         if (lli->lli_default_lsm_md) {
1476                 lmv_free_memmd(lli->lli_default_lsm_md);
1477                 lli->lli_default_lsm_md = NULL;
1478         }
1479 }
1480
1481 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1482                                       const struct lu_fid *fid,
1483                                       struct lustre_md *md)
1484 {
1485         struct ll_sb_info *sbi = ll_s2sbi(sb);
1486         struct ll_inode_info *lli;
1487         struct mdt_body *body = md->body;
1488         struct inode *inode;
1489         ino_t ino;
1490
1491         ENTRY;
1492
1493         LASSERT(md->lmv);
1494         ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1495         inode = iget_locked(sb, ino);
1496         if (inode == NULL) {
1497                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1498                        sbi->ll_fsname, PFID(fid));
1499                 RETURN(ERR_PTR(-ENOENT));
1500         }
1501
1502         lli = ll_i2info(inode);
1503         if (inode->i_state & I_NEW) {
1504                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1505                                 (body->mbo_mode & S_IFMT);
1506                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1507                          PFID(fid));
1508
1509                 inode->i_mtime.tv_sec = 0;
1510                 inode->i_atime.tv_sec = 0;
1511                 inode->i_ctime.tv_sec = 0;
1512                 inode->i_rdev = 0;
1513
1514 #ifdef HAVE_BACKING_DEV_INFO
1515                 /* initializing backing dev info. */
1516                 inode->i_mapping->backing_dev_info =
1517                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1518 #endif
1519                 inode->i_op = &ll_dir_inode_operations;
1520                 inode->i_fop = &ll_dir_operations;
1521                 lli->lli_fid = *fid;
1522                 ll_lli_init(lli);
1523
1524                 /* master object FID */
1525                 lli->lli_pfid = body->mbo_fid1;
1526                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1527                        lli, PFID(fid), PFID(&lli->lli_pfid));
1528                 unlock_new_inode(inode);
1529         } else {
1530                 /* in directory restripe/auto-split, a directory will be
1531                  * transformed to a stripe if it's plain, set its pfid here,
1532                  * otherwise ll_lock_cancel_bits() can't find the master inode.
1533                  */
1534                 lli->lli_pfid = body->mbo_fid1;
1535         }
1536
1537         RETURN(inode);
1538 }
1539
1540 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1541 {
1542         struct lu_fid *fid;
1543         struct lmv_stripe_md *lsm = md->lmv;
1544         struct ll_inode_info *lli = ll_i2info(inode);
1545         int i;
1546
1547         LASSERT(lsm != NULL);
1548
1549         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1550                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1551         lsm_md_dump(D_INODE, lsm);
1552
1553         if (!lmv_dir_striped(lsm))
1554                 goto out;
1555
1556         /* XXX sigh, this lsm_root initialization should be in
1557          * LMV layer, but it needs ll_iget right now, so we
1558          * put this here right now. */
1559         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1560                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1561                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1562
1563                 if (!fid_is_sane(fid))
1564                         continue;
1565
1566                 /* Unfortunately ll_iget will call ll_update_inode,
1567                  * where the initialization of slave inode is slightly
1568                  * different, so it reset lsm_md to NULL to avoid
1569                  * initializing lsm for slave inode. */
1570                 lsm->lsm_md_oinfo[i].lmo_root =
1571                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1572                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1573                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1574
1575                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1576                         while (i-- > 0) {
1577                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1578                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1579                         }
1580                         return rc;
1581                 }
1582         }
1583 out:
1584         lli->lli_lsm_md = lsm;
1585
1586         return 0;
1587 }
1588
1589 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1590 {
1591         struct ll_inode_info *lli = ll_i2info(inode);
1592
1593         if (!md->default_lmv) {
1594                 /* clear default lsm */
1595                 if (lli->lli_default_lsm_md) {
1596                         down_write(&lli->lli_lsm_sem);
1597                         if (lli->lli_default_lsm_md) {
1598                                 lmv_free_memmd(lli->lli_default_lsm_md);
1599                                 lli->lli_default_lsm_md = NULL;
1600                         }
1601                         up_write(&lli->lli_lsm_sem);
1602                 }
1603                 return;
1604         }
1605
1606         if (lli->lli_default_lsm_md) {
1607                 /* do nonthing if default lsm isn't changed */
1608                 down_read(&lli->lli_lsm_sem);
1609                 if (lli->lli_default_lsm_md &&
1610                     lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1611                         up_read(&lli->lli_lsm_sem);
1612                         return;
1613                 }
1614                 up_read(&lli->lli_lsm_sem);
1615         }
1616
1617         down_write(&lli->lli_lsm_sem);
1618         if (lli->lli_default_lsm_md)
1619                 lmv_free_memmd(lli->lli_default_lsm_md);
1620         lli->lli_default_lsm_md = md->default_lmv;
1621         lsm_md_dump(D_INODE, md->default_lmv);
1622         md->default_lmv = NULL;
1623         up_write(&lli->lli_lsm_sem);
1624 }
1625
1626 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1627 {
1628         struct ll_inode_info *lli = ll_i2info(inode);
1629         struct lmv_stripe_md *lsm = md->lmv;
1630         struct cl_attr  *attr;
1631         int rc = 0;
1632
1633         ENTRY;
1634
1635         LASSERT(S_ISDIR(inode->i_mode));
1636         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1637                PFID(ll_inode2fid(inode)));
1638
1639         /* update default LMV */
1640         if (md->default_lmv)
1641                 ll_update_default_lsm_md(inode, md);
1642
1643         /* after dir migration/restripe, a stripe may be turned into a
1644          * directory, in this case, zero out its lli_pfid.
1645          */
1646         if (unlikely(fid_is_norm(&lli->lli_pfid)))
1647                 fid_zero(&lli->lli_pfid);
1648
1649         /*
1650          * no striped information from request, lustre_md from req does not
1651          * include stripeEA, see ll_md_setattr()
1652          */
1653         if (!lsm)
1654                 RETURN(0);
1655
1656         /*
1657          * normally dir layout doesn't change, only take read lock to check
1658          * that to avoid blocking other MD operations.
1659          */
1660         down_read(&lli->lli_lsm_sem);
1661
1662         /* some current lookup initialized lsm, and unchanged */
1663         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1664                 GOTO(unlock, rc = 0);
1665
1666         /* if dir layout doesn't match, check whether version is increased,
1667          * which means layout is changed, this happens in dir split/merge and
1668          * lfsck.
1669          *
1670          * foreign LMV should not change.
1671          */
1672         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1673             lsm->lsm_md_layout_version <=
1674             lli->lli_lsm_md->lsm_md_layout_version) {
1675                 CERROR("%s: "DFID" dir layout mismatch:\n",
1676                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1677                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1678                 lsm_md_dump(D_ERROR, lsm);
1679                 GOTO(unlock, rc = -EINVAL);
1680         }
1681
1682         up_read(&lli->lli_lsm_sem);
1683         down_write(&lli->lli_lsm_sem);
1684         /* clear existing lsm */
1685         if (lli->lli_lsm_md) {
1686                 lmv_free_memmd(lli->lli_lsm_md);
1687                 lli->lli_lsm_md = NULL;
1688         }
1689
1690         rc = ll_init_lsm_md(inode, md);
1691         up_write(&lli->lli_lsm_sem);
1692
1693         if (rc)
1694                 RETURN(rc);
1695
1696         /* set md->lmv to NULL, so the following free lustre_md will not free
1697          * this lsm.
1698          */
1699         md->lmv = NULL;
1700
1701         /* md_merge_attr() may take long, since lsm is already set, switch to
1702          * read lock.
1703          */
1704         down_read(&lli->lli_lsm_sem);
1705
1706         if (!lmv_dir_striped(lli->lli_lsm_md))
1707                 GOTO(unlock, rc = 0);
1708
1709         OBD_ALLOC_PTR(attr);
1710         if (!attr)
1711                 GOTO(unlock, rc = -ENOMEM);
1712
1713         /* validate the lsm */
1714         rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1715                            ll_md_blocking_ast);
1716         if (!rc) {
1717                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1718                         md->body->mbo_nlink = attr->cat_nlink;
1719                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1720                         md->body->mbo_size = attr->cat_size;
1721                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1722                         md->body->mbo_atime = attr->cat_atime;
1723                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1724                         md->body->mbo_ctime = attr->cat_ctime;
1725                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1726                         md->body->mbo_mtime = attr->cat_mtime;
1727         }
1728
1729         OBD_FREE_PTR(attr);
1730         GOTO(unlock, rc);
1731 unlock:
1732         up_read(&lli->lli_lsm_sem);
1733
1734         return rc;
1735 }
1736
1737 void ll_clear_inode(struct inode *inode)
1738 {
1739         struct ll_inode_info *lli = ll_i2info(inode);
1740         struct ll_sb_info *sbi = ll_i2sbi(inode);
1741
1742         ENTRY;
1743
1744         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1745                PFID(ll_inode2fid(inode)), inode);
1746
1747         if (S_ISDIR(inode->i_mode)) {
1748                 /* these should have been cleared in ll_file_release */
1749                 LASSERT(lli->lli_opendir_key == NULL);
1750                 LASSERT(lli->lli_sai == NULL);
1751                 LASSERT(lli->lli_opendir_pid == 0);
1752         } else {
1753                 pcc_inode_free(inode);
1754         }
1755
1756         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1757
1758         LASSERT(!lli->lli_open_fd_write_count);
1759         LASSERT(!lli->lli_open_fd_read_count);
1760         LASSERT(!lli->lli_open_fd_exec_count);
1761
1762         if (lli->lli_mds_write_och)
1763                 ll_md_real_close(inode, FMODE_WRITE);
1764         if (lli->lli_mds_exec_och)
1765                 ll_md_real_close(inode, FMODE_EXEC);
1766         if (lli->lli_mds_read_och)
1767                 ll_md_real_close(inode, FMODE_READ);
1768
1769         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1770                 OBD_FREE(lli->lli_symlink_name,
1771                          strlen(lli->lli_symlink_name) + 1);
1772                 lli->lli_symlink_name = NULL;
1773         }
1774
1775         ll_xattr_cache_destroy(inode);
1776
1777         forget_all_cached_acls(inode);
1778         lli_clear_acl(lli);
1779         lli->lli_inode_magic = LLI_INODE_DEAD;
1780
1781         if (S_ISDIR(inode->i_mode))
1782                 ll_dir_clear_lsm_md(inode);
1783         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1784                 LASSERT(list_empty(&lli->lli_agl_list));
1785
1786         /*
1787          * XXX This has to be done before lsm is freed below, because
1788          * cl_object still uses inode lsm.
1789          */
1790         cl_inode_fini(inode);
1791
1792         llcrypt_put_encryption_info(inode);
1793
1794         EXIT;
1795 }
1796
1797 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1798 {
1799         struct lustre_md md;
1800         struct inode *inode = dentry->d_inode;
1801         struct ll_sb_info *sbi = ll_i2sbi(inode);
1802         struct ptlrpc_request *request = NULL;
1803         int rc, ia_valid;
1804
1805         ENTRY;
1806
1807         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1808                                      LUSTRE_OPC_ANY, NULL);
1809         if (IS_ERR(op_data))
1810                 RETURN(PTR_ERR(op_data));
1811
1812         /* If this is a chgrp of a regular file, we want to reserve enough
1813          * quota to cover the entire file size.
1814          */
1815         if (S_ISREG(inode->i_mode) && op_data->op_attr.ia_valid & ATTR_GID &&
1816             from_kgid(&init_user_ns, op_data->op_attr.ia_gid) !=
1817             from_kgid(&init_user_ns, inode->i_gid)) {
1818                 op_data->op_xvalid |= OP_XVALID_BLOCKS;
1819                 op_data->op_attr_blocks = inode->i_blocks;
1820         }
1821
1822
1823         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1824         if (rc) {
1825                 ptlrpc_req_finished(request);
1826                 if (rc == -ENOENT) {
1827                         clear_nlink(inode);
1828                         /* Unlinked special device node? Or just a race?
1829                          * Pretend we done everything. */
1830                         if (!S_ISREG(inode->i_mode) &&
1831                             !S_ISDIR(inode->i_mode)) {
1832                                 ia_valid = op_data->op_attr.ia_valid;
1833                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1834                                 rc = simple_setattr(dentry, &op_data->op_attr);
1835                                 op_data->op_attr.ia_valid = ia_valid;
1836                         }
1837                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1838                         CERROR("md_setattr fails: rc = %d\n", rc);
1839                 }
1840                 RETURN(rc);
1841         }
1842
1843         rc = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill, sbi->ll_dt_exp,
1844                               sbi->ll_md_exp, &md);
1845         if (rc) {
1846                 ptlrpc_req_finished(request);
1847                 RETURN(rc);
1848         }
1849
1850         ia_valid = op_data->op_attr.ia_valid;
1851         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1852          * cache is not cleared yet. */
1853         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1854         if (S_ISREG(inode->i_mode))
1855                 inode_lock(inode);
1856         rc = simple_setattr(dentry, &op_data->op_attr);
1857         if (S_ISREG(inode->i_mode))
1858                 inode_unlock(inode);
1859         op_data->op_attr.ia_valid = ia_valid;
1860
1861         rc = ll_update_inode(inode, &md);
1862         ptlrpc_req_finished(request);
1863
1864         RETURN(rc);
1865 }
1866
1867 /**
1868  * Zero portion of page that is part of @inode.
1869  * This implies, if necessary:
1870  * - taking cl_lock on range corresponding to concerned page
1871  * - grabbing vm page
1872  * - associating cl_page
1873  * - proceeding to clio read
1874  * - zeroing range in page
1875  * - proceeding to cl_page flush
1876  * - releasing cl_lock
1877  *
1878  * \param[in] inode     inode
1879  * \param[in] index     page index
1880  * \param[in] offset    offset in page to start zero from
1881  * \param[in] len       len to zero
1882  *
1883  * \retval 0            on success
1884  * \retval negative     errno on failure
1885  */
1886 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1887                     unsigned len)
1888 {
1889         struct ll_inode_info *lli = ll_i2info(inode);
1890         struct cl_object *clob = lli->lli_clob;
1891         __u16 refcheck;
1892         struct lu_env *env = NULL;
1893         struct cl_io *io = NULL;
1894         struct cl_page *clpage = NULL;
1895         struct page *vmpage = NULL;
1896         unsigned from = index << PAGE_SHIFT;
1897         struct cl_lock *lock = NULL;
1898         struct cl_lock_descr *descr = NULL;
1899         struct cl_2queue *queue = NULL;
1900         struct cl_sync_io *anchor = NULL;
1901         bool holdinglock = false;
1902         bool lockedbymyself = true;
1903         int rc;
1904
1905         ENTRY;
1906
1907         env = cl_env_get(&refcheck);
1908         if (IS_ERR(env))
1909                 RETURN(PTR_ERR(env));
1910
1911         io = vvp_env_thread_io(env);
1912         io->ci_obj = clob;
1913         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1914         if (rc)
1915                 GOTO(putenv, rc);
1916
1917         lock = vvp_env_lock(env);
1918         descr = &lock->cll_descr;
1919         descr->cld_obj   = io->ci_obj;
1920         descr->cld_start = cl_index(io->ci_obj, from);
1921         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1922         descr->cld_mode  = CLM_WRITE;
1923         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1924
1925         /* request lock for page */
1926         rc = cl_lock_request(env, io, lock);
1927         /* -ECANCELED indicates a matching lock with a different extent
1928          * was already present, and -EEXIST indicates a matching lock
1929          * on exactly the same extent was already present.
1930          * In both cases it means we are covered.
1931          */
1932         if (rc == -ECANCELED || rc == -EEXIST)
1933                 rc = 0;
1934         else if (rc < 0)
1935                 GOTO(iofini, rc);
1936         else
1937                 holdinglock = true;
1938
1939         /* grab page */
1940         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1941         if (vmpage == NULL)
1942                 GOTO(rellock, rc = -EOPNOTSUPP);
1943
1944         if (!PageDirty(vmpage)) {
1945                 /* associate cl_page */
1946                 clpage = cl_page_find(env, clob, vmpage->index,
1947                                       vmpage, CPT_CACHEABLE);
1948                 if (IS_ERR(clpage))
1949                         GOTO(pagefini, rc = PTR_ERR(clpage));
1950
1951                 cl_page_assume(env, io, clpage);
1952         }
1953
1954         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1955             !PageWriteback(vmpage)) {
1956                 /* read page */
1957                 /* set PagePrivate2 to detect special case of empty page
1958                  * in osc_brw_fini_request()
1959                  */
1960                 SetPagePrivate2(vmpage);
1961                 rc = ll_io_read_page(env, io, clpage, NULL);
1962                 if (!PagePrivate2(vmpage))
1963                         /* PagePrivate2 was cleared in osc_brw_fini_request()
1964                          * meaning we read an empty page. In this case, in order
1965                          * to avoid allocating unnecessary block in truncated
1966                          * file, we must not zero and write as below. Subsequent
1967                          * server-side truncate will handle things correctly.
1968                          */
1969                         GOTO(clpfini, rc = 0);
1970                 ClearPagePrivate2(vmpage);
1971                 if (rc)
1972                         GOTO(clpfini, rc);
1973                 lockedbymyself = trylock_page(vmpage);
1974                 cl_page_assume(env, io, clpage);
1975         }
1976
1977         /* zero range in page */
1978         zero_user(vmpage, offset, len);
1979
1980         if (holdinglock && clpage) {
1981                 /* explicitly write newly modified page */
1982                 queue = &io->ci_queue;
1983                 cl_2queue_init(queue);
1984                 anchor = &vvp_env_info(env)->vti_anchor;
1985                 cl_sync_io_init(anchor, 1);
1986                 clpage->cp_sync_io = anchor;
1987                 cl_2queue_add(queue, clpage, true);
1988                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
1989                 if (rc)
1990                         GOTO(queuefini1, rc);
1991                 rc = cl_sync_io_wait(env, anchor, 0);
1992                 if (rc)
1993                         GOTO(queuefini2, rc);
1994                 cl_page_assume(env, io, clpage);
1995
1996 queuefini2:
1997                 cl_2queue_discard(env, io, queue);
1998 queuefini1:
1999                 cl_2queue_disown(env, io, queue);
2000                 cl_2queue_fini(env, queue);
2001         }
2002
2003 clpfini:
2004         if (clpage)
2005                 cl_page_put(env, clpage);
2006 pagefini:
2007         if (lockedbymyself) {
2008                 unlock_page(vmpage);
2009                 put_page(vmpage);
2010         }
2011 rellock:
2012         if (holdinglock)
2013                 cl_lock_release(env, lock);
2014 iofini:
2015         cl_io_fini(env, io);
2016 putenv:
2017         if (env)
2018                 cl_env_put(env, &refcheck);
2019
2020         RETURN(rc);
2021 }
2022
2023 /* If this inode has objects allocated to it (lsm != NULL), then the OST
2024  * object(s) determine the file size and mtime.  Otherwise, the MDS will
2025  * keep these values until such a time that objects are allocated for it.
2026  * We do the MDS operations first, as it is checking permissions for us.
2027  * We don't to the MDS RPC if there is nothing that we want to store there,
2028  * otherwise there is no harm in updating mtime/atime on the MDS if we are
2029  * going to do an RPC anyways.
2030  *
2031  * If we are doing a truncate, we will send the mtime and ctime updates
2032  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
2033  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
2034  * at the same time.
2035  *
2036  * In case of HSMimport, we only set attr on MDS.
2037  */
2038 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
2039                    enum op_xvalid xvalid, bool hsm_import)
2040 {
2041         struct inode *inode = dentry->d_inode;
2042         struct ll_inode_info *lli = ll_i2info(inode);
2043         struct md_op_data *op_data = NULL;
2044         ktime_t kstart = ktime_get();
2045         int rc = 0;
2046
2047         ENTRY;
2048
2049         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
2050                "valid %x, hsm_import %d\n",
2051                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
2052                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
2053                hsm_import);
2054
2055         if (attr->ia_valid & ATTR_SIZE) {
2056                 /* Check new size against VFS/VM file size limit and rlimit */
2057                 rc = inode_newsize_ok(inode, attr->ia_size);
2058                 if (rc)
2059                         RETURN(rc);
2060
2061                 /* The maximum Lustre file size is variable, based on the
2062                  * OST maximum object size and number of stripes.  This
2063                  * needs another check in addition to the VFS check above. */
2064                 if (attr->ia_size > ll_file_maxbytes(inode)) {
2065                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
2066                                PFID(&lli->lli_fid), attr->ia_size,
2067                                ll_file_maxbytes(inode));
2068                         RETURN(-EFBIG);
2069                 }
2070
2071                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2072         }
2073
2074         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
2075         if (attr->ia_valid & TIMES_SET_FLAGS) {
2076                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2077                     !capable(CAP_FOWNER))
2078                         RETURN(-EPERM);
2079         }
2080
2081         /* We mark all of the fields "set" so MDS/OST does not re-set them */
2082         if (!(xvalid & OP_XVALID_CTIME_SET) &&
2083              (attr->ia_valid & ATTR_CTIME)) {
2084                 attr->ia_ctime = current_time(inode);
2085                 xvalid |= OP_XVALID_CTIME_SET;
2086         }
2087         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
2088             (attr->ia_valid & ATTR_ATIME)) {
2089                 attr->ia_atime = current_time(inode);
2090                 attr->ia_valid |= ATTR_ATIME_SET;
2091         }
2092         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
2093             (attr->ia_valid & ATTR_MTIME)) {
2094                 attr->ia_mtime = current_time(inode);
2095                 attr->ia_valid |= ATTR_MTIME_SET;
2096         }
2097
2098         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
2099                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
2100                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
2101                        ktime_get_real_seconds());
2102
2103         if (S_ISREG(inode->i_mode))
2104                 inode_unlock(inode);
2105
2106         /* We always do an MDS RPC, even if we're only changing the size;
2107          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
2108
2109         OBD_ALLOC_PTR(op_data);
2110         if (op_data == NULL)
2111                 GOTO(out, rc = -ENOMEM);
2112
2113         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
2114                 /* If we are changing file size, file content is
2115                  * modified, flag it.
2116                  */
2117                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2118                 op_data->op_bias |= MDS_DATA_MODIFIED;
2119                 clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
2120         }
2121
2122         if (attr->ia_valid & ATTR_FILE) {
2123                 struct ll_file_data *fd = attr->ia_file->private_data;
2124
2125                 if (fd->fd_lease_och)
2126                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2127         }
2128
2129         op_data->op_attr = *attr;
2130         op_data->op_xvalid = xvalid;
2131
2132         rc = ll_md_setattr(dentry, op_data);
2133         if (rc)
2134                 GOTO(out, rc);
2135
2136         if (!S_ISREG(inode->i_mode) || hsm_import)
2137                 GOTO(out, rc = 0);
2138
2139         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2140                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2141             xvalid & OP_XVALID_CTIME_SET) {
2142                 bool cached = false;
2143
2144                 rc = pcc_inode_setattr(inode, attr, &cached);
2145                 if (cached) {
2146                         if (rc) {
2147                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2148                                        "rc = %d\n",
2149                                        ll_i2sbi(inode)->ll_fsname,
2150                                        PFID(&lli->lli_fid), rc);
2151                                 GOTO(out, rc);
2152                         }
2153                 } else {
2154                         unsigned int flags = 0;
2155
2156                         /* For truncate and utimes sending attributes to OSTs,
2157                          * setting mtime/atime to the past will be performed
2158                          * under PW [0:EOF] extent lock (new_size:EOF for
2159                          * truncate). It may seem excessive to send mtime/atime
2160                          * updates to OSTs when not setting times to past, but
2161                          * it is necessary due to possible time
2162                          * de-synchronization between MDT inode and OST objects
2163                          */
2164                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
2165                                 xvalid |= OP_XVALID_FLAGS;
2166                                 flags = LUSTRE_ENCRYPT_FL;
2167                                 /* Call to ll_io_zero_page is not necessary if
2168                                  * truncating on PAGE_SIZE boundary, because
2169                                  * whole pages will be wiped.
2170                                  * In case of Direct IO, all we need is to set
2171                                  * new size.
2172                                  */
2173                                 if (attr->ia_valid & ATTR_SIZE &&
2174                                     attr->ia_size & ~PAGE_MASK &&
2175                                     !(attr->ia_valid & ATTR_FILE &&
2176                                       attr->ia_file->f_flags & O_DIRECT)) {
2177                                         pgoff_t offset =
2178                                                 attr->ia_size & (PAGE_SIZE - 1);
2179
2180                                         rc = ll_io_zero_page(inode,
2181                                                     attr->ia_size >> PAGE_SHIFT,
2182                                                     offset, PAGE_SIZE - offset);
2183                                         if (rc)
2184                                                 GOTO(out, rc);
2185                                 }
2186                         }
2187                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2188                 }
2189         }
2190
2191         /* If the file was restored, it needs to set dirty flag.
2192          *
2193          * We've already sent MDS_DATA_MODIFIED flag in
2194          * ll_md_setattr() for truncate. However, the MDT refuses to
2195          * set the HS_DIRTY flag on released files, so we have to set
2196          * it again if the file has been restored. Please check how
2197          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2198          *
2199          * Please notice that if the file is not released, the previous
2200          * MDS_DATA_MODIFIED has taken effect and usually
2201          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2202          * This way we can save an RPC for common open + trunc
2203          * operation. */
2204         if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
2205                 struct hsm_state_set hss = {
2206                         .hss_valid = HSS_SETMASK,
2207                         .hss_setmask = HS_DIRTY,
2208                 };
2209                 int rc2;
2210
2211                 rc2 = ll_hsm_state_set(inode, &hss);
2212                 /* truncate and write can happen at the same time, so that
2213                  * the file can be set modified even though the file is not
2214                  * restored from released state, and ll_hsm_state_set() is
2215                  * not applicable for the file, and rc2 < 0 is normal in this
2216                  * case. */
2217                 if (rc2 < 0)
2218                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2219                                PFID(ll_inode2fid(inode)), rc2);
2220         }
2221
2222         EXIT;
2223 out:
2224         if (op_data != NULL)
2225                 ll_finish_md_op_data(op_data);
2226
2227         if (S_ISREG(inode->i_mode)) {
2228                 inode_lock(inode);
2229                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2230                         inode_dio_wait(inode);
2231                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2232                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2233                  * inode flags, so there is a gap where S_NOSEC is not set.
2234                  * This can cause a writer to take the i_mutex unnecessarily,
2235                  * but this is safe to do and should be rare. */
2236                 inode_has_no_xattr(inode);
2237         }
2238
2239         if (!rc)
2240                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2241                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2242                                    ktime_us_delta(ktime_get(), kstart));
2243
2244         return rc;
2245 }
2246
2247 int ll_setattr(struct dentry *de, struct iattr *attr)
2248 {
2249         int mode = de->d_inode->i_mode;
2250         enum op_xvalid xvalid = 0;
2251         int rc;
2252
2253         rc = llcrypt_prepare_setattr(de, attr);
2254         if (rc)
2255                 return rc;
2256
2257         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2258                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2259                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2260
2261         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2262                                (ATTR_SIZE|ATTR_MODE)) &&
2263             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2264              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2265               !(attr->ia_mode & S_ISGID))))
2266                 attr->ia_valid |= ATTR_FORCE;
2267
2268         if ((attr->ia_valid & ATTR_MODE) &&
2269             (mode & S_ISUID) &&
2270             !(attr->ia_mode & S_ISUID) &&
2271             !(attr->ia_valid & ATTR_KILL_SUID))
2272                 attr->ia_valid |= ATTR_KILL_SUID;
2273
2274         if ((attr->ia_valid & ATTR_MODE) &&
2275             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2276             !(attr->ia_mode & S_ISGID) &&
2277             !(attr->ia_valid & ATTR_KILL_SGID))
2278                 attr->ia_valid |= ATTR_KILL_SGID;
2279
2280         return ll_setattr_raw(de, attr, xvalid, false);
2281 }
2282
2283 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2284                        u32 flags)
2285 {
2286         struct obd_statfs obd_osfs = { 0 };
2287         time64_t max_age;
2288         int rc;
2289
2290         ENTRY;
2291         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2292
2293         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2294                 flags |= OBD_STATFS_NODELAY;
2295
2296         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2297         if (rc)
2298                 RETURN(rc);
2299
2300         osfs->os_type = LL_SUPER_MAGIC;
2301
2302         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2303               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2304
2305         if (osfs->os_state & OS_STATFS_SUM)
2306                 GOTO(out, rc);
2307
2308         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2309         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2310                 GOTO(out, rc = 0);
2311
2312         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2313                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2314                obd_osfs.os_files);
2315
2316         osfs->os_bsize = obd_osfs.os_bsize;
2317         osfs->os_blocks = obd_osfs.os_blocks;
2318         osfs->os_bfree = obd_osfs.os_bfree;
2319         osfs->os_bavail = obd_osfs.os_bavail;
2320
2321         /* If we have _some_ OSTs, but don't have as many free objects on the
2322          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2323          * to compensate, so that the "inodes in use" number is correct.
2324          * This should be kept in sync with lod_statfs() behaviour.
2325          */
2326         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2327                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2328                                  obd_osfs.os_ffree;
2329                 osfs->os_ffree = obd_osfs.os_ffree;
2330         }
2331
2332 out:
2333         RETURN(rc);
2334 }
2335
2336 static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
2337 {
2338         struct if_quotactl qctl = {
2339                 .qc_cmd = LUSTRE_Q_GETQUOTA,
2340                 .qc_type = PRJQUOTA,
2341                 .qc_valid = QC_GENERAL,
2342         };
2343         u64 limit, curblock;
2344         int ret;
2345
2346         qctl.qc_id = ll_i2info(inode)->lli_projid;
2347         ret = quotactl_ioctl(inode->i_sb, &qctl);
2348         if (ret) {
2349                 /* ignore errors if project ID does not have
2350                  * a quota limit or feature unsupported.
2351                  */
2352                 if (ret == -ESRCH || ret == -EOPNOTSUPP)
2353                         ret = 0;
2354                 return ret;
2355         }
2356
2357         limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
2358                  qctl.qc_dqblk.dqb_bsoftlimit :
2359                  qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
2360         if (limit && sfs->f_blocks > limit) {
2361                 curblock = (qctl.qc_dqblk.dqb_curspace +
2362                                 sfs->f_bsize - 1) / sfs->f_bsize;
2363                 sfs->f_blocks = limit;
2364                 sfs->f_bfree = sfs->f_bavail =
2365                         (sfs->f_blocks > curblock) ?
2366                         (sfs->f_blocks - curblock) : 0;
2367         }
2368
2369         limit = qctl.qc_dqblk.dqb_isoftlimit ?
2370                 qctl.qc_dqblk.dqb_isoftlimit :
2371                 qctl.qc_dqblk.dqb_ihardlimit;
2372         if (limit && sfs->f_files > limit) {
2373                 sfs->f_files = limit;
2374                 sfs->f_ffree = (sfs->f_files >
2375                         qctl.qc_dqblk.dqb_curinodes) ?
2376                         (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
2377         }
2378
2379         return 0;
2380 }
2381
2382 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2383 {
2384         struct super_block *sb = de->d_sb;
2385         struct obd_statfs osfs;
2386         __u64 fsid = huge_encode_dev(sb->s_dev);
2387         ktime_t kstart = ktime_get();
2388         int rc;
2389
2390         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2391
2392         /* Some amount of caching on the client is allowed */
2393         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2394         if (rc)
2395                 return rc;
2396
2397         statfs_unpack(sfs, &osfs);
2398
2399         /* We need to downshift for all 32-bit kernels, because we can't
2400          * tell if the kernel is being called via sys_statfs64() or not.
2401          * Stop before overflowing f_bsize - in which case it is better
2402          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2403         if (sizeof(long) < 8) {
2404                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2405                         sfs->f_bsize <<= 1;
2406
2407                         osfs.os_blocks >>= 1;
2408                         osfs.os_bfree >>= 1;
2409                         osfs.os_bavail >>= 1;
2410                 }
2411         }
2412
2413         sfs->f_blocks = osfs.os_blocks;
2414         sfs->f_bfree = osfs.os_bfree;
2415         sfs->f_bavail = osfs.os_bavail;
2416         sfs->f_fsid.val[0] = (__u32)fsid;
2417         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2418         if (ll_i2info(de->d_inode)->lli_projid)
2419                 return ll_statfs_project(de->d_inode, sfs);
2420
2421         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2422                            ktime_us_delta(ktime_get(), kstart));
2423
2424         return 0;
2425 }
2426
2427 void ll_inode_size_lock(struct inode *inode)
2428 {
2429         struct ll_inode_info *lli;
2430
2431         LASSERT(!S_ISDIR(inode->i_mode));
2432
2433         lli = ll_i2info(inode);
2434         mutex_lock(&lli->lli_size_mutex);
2435 }
2436
2437 void ll_inode_size_unlock(struct inode *inode)
2438 {
2439         struct ll_inode_info *lli;
2440
2441         lli = ll_i2info(inode);
2442         mutex_unlock(&lli->lli_size_mutex);
2443 }
2444
2445 void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags)
2446 {
2447         /* do not clear encryption flag */
2448         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2449         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2450         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2451                 set_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2452         else
2453                 clear_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2454 }
2455
2456 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2457 {
2458         struct ll_inode_info *lli = ll_i2info(inode);
2459         struct mdt_body *body = md->body;
2460         struct ll_sb_info *sbi = ll_i2sbi(inode);
2461         int rc = 0;
2462
2463         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2464                 rc = cl_file_inode_init(inode, md);
2465                 if (rc)
2466                         return rc;
2467         }
2468
2469         if (S_ISDIR(inode->i_mode)) {
2470                 rc = ll_update_lsm_md(inode, md);
2471                 if (rc != 0)
2472                         return rc;
2473         }
2474
2475         if (body->mbo_valid & OBD_MD_FLACL)
2476                 lli_replace_acl(lli, md);
2477
2478         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
2479                                         sbi->ll_flags & LL_SBI_32BIT_API);
2480         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2481
2482         if (body->mbo_valid & OBD_MD_FLATIME) {
2483                 if (body->mbo_atime > inode->i_atime.tv_sec)
2484                         inode->i_atime.tv_sec = body->mbo_atime;
2485                 lli->lli_atime = body->mbo_atime;
2486         }
2487
2488         if (body->mbo_valid & OBD_MD_FLMTIME) {
2489                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2490                         CDEBUG(D_INODE,
2491                                "setting ino %lu mtime from %lld to %llu\n",
2492                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2493                                body->mbo_mtime);
2494                         inode->i_mtime.tv_sec = body->mbo_mtime;
2495                 }
2496                 lli->lli_mtime = body->mbo_mtime;
2497         }
2498
2499         if (body->mbo_valid & OBD_MD_FLCTIME) {
2500                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2501                         inode->i_ctime.tv_sec = body->mbo_ctime;
2502                 lli->lli_ctime = body->mbo_ctime;
2503         }
2504
2505         if (body->mbo_valid & OBD_MD_FLBTIME)
2506                 lli->lli_btime = body->mbo_btime;
2507
2508         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2509         if (body->mbo_valid & OBD_MD_FLFLAGS)
2510                 ll_update_inode_flags(inode, body->mbo_flags);
2511         if (body->mbo_valid & OBD_MD_FLMODE)
2512                 inode->i_mode = (inode->i_mode & S_IFMT) |
2513                                 (body->mbo_mode & ~S_IFMT);
2514
2515         if (body->mbo_valid & OBD_MD_FLTYPE)
2516                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2517                                 (body->mbo_mode & S_IFMT);
2518
2519         LASSERT(inode->i_mode != 0);
2520         if (body->mbo_valid & OBD_MD_FLUID)
2521                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2522         if (body->mbo_valid & OBD_MD_FLGID)
2523                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2524         if (body->mbo_valid & OBD_MD_FLPROJID)
2525                 lli->lli_projid = body->mbo_projid;
2526         if (body->mbo_valid & OBD_MD_FLNLINK)
2527                 set_nlink(inode, body->mbo_nlink);
2528         if (body->mbo_valid & OBD_MD_FLRDEV)
2529                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2530
2531         if (body->mbo_valid & OBD_MD_FLID) {
2532                 /* FID shouldn't be changed! */
2533                 if (fid_is_sane(&lli->lli_fid)) {
2534                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2535                                  "Trying to change FID "DFID
2536                                  " to the "DFID", inode "DFID"(%p)\n",
2537                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2538                                  PFID(ll_inode2fid(inode)), inode);
2539                 } else {
2540                         lli->lli_fid = body->mbo_fid1;
2541                 }
2542         }
2543
2544         LASSERT(fid_seq(&lli->lli_fid) != 0);
2545
2546         lli->lli_attr_valid = body->mbo_valid;
2547         if (body->mbo_valid & OBD_MD_FLSIZE) {
2548                 i_size_write(inode, body->mbo_size);
2549
2550                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2551                        PFID(ll_inode2fid(inode)),
2552                        (unsigned long long)body->mbo_size);
2553
2554                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2555                         inode->i_blocks = body->mbo_blocks;
2556         } else {
2557                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2558                         lli->lli_lazysize = body->mbo_size;
2559                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2560                         lli->lli_lazyblocks = body->mbo_blocks;
2561         }
2562
2563         if (body->mbo_valid & OBD_MD_TSTATE) {
2564                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2565                  * clear it when done to ensure to start again
2566                  * glimpsing updated attrs
2567                  */
2568                 if (body->mbo_t_state & MS_RESTORE)
2569                         set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2570                 else
2571                         clear_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2572         }
2573
2574         return 0;
2575 }
2576
2577 /* update directory depth to ROOT, called after LOOKUP lock is fetched. */
2578 void ll_update_dir_depth(struct inode *dir, struct inode *inode)
2579 {
2580         struct ll_inode_info *lli;
2581
2582         if (!S_ISDIR(inode->i_mode))
2583                 return;
2584
2585         if (inode == dir)
2586                 return;
2587
2588         lli = ll_i2info(inode);
2589         lli->lli_depth = ll_i2info(dir)->lli_depth + 1;
2590         CDEBUG(D_INODE, DFID" depth %hu\n", PFID(&lli->lli_fid), lli->lli_depth);
2591 }
2592
2593 void ll_truncate_inode_pages_final(struct inode *inode)
2594 {
2595         struct address_space *mapping = &inode->i_data;
2596         unsigned long nrpages;
2597         unsigned long flags;
2598
2599         truncate_inode_pages_final(mapping);
2600
2601         /* Workaround for LU-118: Note nrpages may not be totally updated when
2602          * truncate_inode_pages() returns, as there can be a page in the process
2603          * of deletion (inside __delete_from_page_cache()) in the specified
2604          * range. Thus mapping->nrpages can be non-zero when this function
2605          * returns even after truncation of the whole mapping.  Only do this if
2606          * npages isn't already zero.
2607          */
2608         nrpages = mapping->nrpages;
2609         if (nrpages) {
2610                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2611                 nrpages = mapping->nrpages;
2612                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2613         } /* Workaround end */
2614
2615         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2616                  "see https://jira.whamcloud.com/browse/LU-118\n",
2617                  ll_i2sbi(inode)->ll_fsname,
2618                  PFID(ll_inode2fid(inode)), inode, nrpages);
2619 }
2620
2621 int ll_read_inode2(struct inode *inode, void *opaque)
2622 {
2623         struct lustre_md *md = opaque;
2624         struct ll_inode_info *lli = ll_i2info(inode);
2625         int     rc;
2626         ENTRY;
2627
2628         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2629                PFID(&lli->lli_fid), inode);
2630
2631         /* Core attributes from the MDS first.  This is a new inode, and
2632          * the VFS doesn't zero times in the core inode so we have to do
2633          * it ourselves.  They will be overwritten by either MDS or OST
2634          * attributes - we just need to make sure they aren't newer.
2635          */
2636         inode->i_mtime.tv_sec = 0;
2637         inode->i_atime.tv_sec = 0;
2638         inode->i_ctime.tv_sec = 0;
2639         inode->i_rdev = 0;
2640         rc = ll_update_inode(inode, md);
2641         if (rc != 0)
2642                 RETURN(rc);
2643
2644         /* OIDEBUG(inode); */
2645
2646 #ifdef HAVE_BACKING_DEV_INFO
2647         /* initializing backing dev info. */
2648         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2649 #endif
2650         if (S_ISREG(inode->i_mode)) {
2651                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2652                 inode->i_op = &ll_file_inode_operations;
2653                 inode->i_fop = sbi->ll_fop;
2654                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2655                 EXIT;
2656         } else if (S_ISDIR(inode->i_mode)) {
2657                 inode->i_op = &ll_dir_inode_operations;
2658                 inode->i_fop = &ll_dir_operations;
2659                 EXIT;
2660         } else if (S_ISLNK(inode->i_mode)) {
2661                 inode->i_op = &ll_fast_symlink_inode_operations;
2662                 EXIT;
2663         } else {
2664                 inode->i_op = &ll_special_inode_operations;
2665
2666                 init_special_inode(inode, inode->i_mode,
2667                                    inode->i_rdev);
2668
2669                 EXIT;
2670         }
2671
2672         return 0;
2673 }
2674
2675 void ll_delete_inode(struct inode *inode)
2676 {
2677         struct ll_inode_info *lli = ll_i2info(inode);
2678         ENTRY;
2679
2680         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2681                 /* It is last chance to write out dirty pages,
2682                  * otherwise we may lose data while umount.
2683                  *
2684                  * If i_nlink is 0 then just discard data. This is safe because
2685                  * local inode gets i_nlink 0 from server only for the last
2686                  * unlink, so that file is not opened somewhere else
2687                  */
2688                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2689                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2690         }
2691
2692         ll_truncate_inode_pages_final(inode);
2693         ll_clear_inode(inode);
2694         clear_inode(inode);
2695
2696         EXIT;
2697 }
2698
2699 int ll_iocontrol(struct inode *inode, struct file *file,
2700                  unsigned int cmd, unsigned long arg)
2701 {
2702         struct ll_sb_info *sbi = ll_i2sbi(inode);
2703         struct ptlrpc_request *req = NULL;
2704         int rc, flags = 0;
2705         ENTRY;
2706
2707         switch (cmd) {
2708         case FS_IOC_GETFLAGS: {
2709                 struct mdt_body *body;
2710                 struct md_op_data *op_data;
2711
2712                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2713                                              0, 0, LUSTRE_OPC_ANY,
2714                                              NULL);
2715                 if (IS_ERR(op_data))
2716                         RETURN(PTR_ERR(op_data));
2717
2718                 op_data->op_valid = OBD_MD_FLFLAGS;
2719                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2720                 ll_finish_md_op_data(op_data);
2721                 if (rc) {
2722                         CERROR("%s: failure inode "DFID": rc = %d\n",
2723                                sbi->ll_md_exp->exp_obd->obd_name,
2724                                PFID(ll_inode2fid(inode)), rc);
2725                         RETURN(-abs(rc));
2726                 }
2727
2728                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2729
2730                 flags = body->mbo_flags;
2731
2732                 ptlrpc_req_finished(req);
2733
2734                 RETURN(put_user(flags, (int __user *)arg));
2735         }
2736         case FS_IOC_SETFLAGS: {
2737                 struct iattr *attr;
2738                 struct md_op_data *op_data;
2739                 struct cl_object *obj;
2740                 struct fsxattr fa = { 0 };
2741
2742                 if (get_user(flags, (int __user *)arg))
2743                         RETURN(-EFAULT);
2744
2745                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2746                 if (flags & LUSTRE_PROJINHERIT_FL)
2747                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2748
2749                 rc = ll_ioctl_check_project(inode, fa.fsx_xflags,
2750                                             fa.fsx_projid);
2751                 if (rc)
2752                         RETURN(rc);
2753
2754                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2755                                              LUSTRE_OPC_ANY, NULL);
2756                 if (IS_ERR(op_data))
2757                         RETURN(PTR_ERR(op_data));
2758
2759                 op_data->op_attr_flags = flags;
2760                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2761                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2762                 ll_finish_md_op_data(op_data);
2763                 ptlrpc_req_finished(req);
2764                 if (rc)
2765                         RETURN(rc);
2766
2767                 ll_update_inode_flags(inode, flags);
2768
2769                 obj = ll_i2info(inode)->lli_clob;
2770                 if (obj == NULL)
2771                         RETURN(0);
2772
2773                 OBD_ALLOC_PTR(attr);
2774                 if (attr == NULL)
2775                         RETURN(-ENOMEM);
2776
2777                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2778
2779                 OBD_FREE_PTR(attr);
2780                 RETURN(rc);
2781         }
2782         default:
2783                 RETURN(-ENOSYS);
2784         }
2785
2786         RETURN(0);
2787 }
2788
2789 int ll_flush_ctx(struct inode *inode)
2790 {
2791         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2792
2793         CDEBUG(D_SEC, "flush context for user %d\n",
2794                from_kuid(&init_user_ns, current_uid()));
2795
2796         obd_set_info_async(NULL, sbi->ll_md_exp,
2797                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2798                            0, NULL, NULL);
2799         obd_set_info_async(NULL, sbi->ll_dt_exp,
2800                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2801                            0, NULL, NULL);
2802         return 0;
2803 }
2804
2805 /* umount -f client means force down, don't save state */
2806 void ll_umount_begin(struct super_block *sb)
2807 {
2808         struct ll_sb_info *sbi = ll_s2sbi(sb);
2809         struct obd_device *obd;
2810         struct obd_ioctl_data *ioc_data;
2811         int cnt;
2812         ENTRY;
2813
2814         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2815                sb->s_count, atomic_read(&sb->s_active));
2816
2817         obd = class_exp2obd(sbi->ll_md_exp);
2818         if (obd == NULL) {
2819                 CERROR("Invalid MDC connection handle %#llx\n",
2820                        sbi->ll_md_exp->exp_handle.h_cookie);
2821                 EXIT;
2822                 return;
2823         }
2824         obd->obd_force = 1;
2825
2826         obd = class_exp2obd(sbi->ll_dt_exp);
2827         if (obd == NULL) {
2828                 CERROR("Invalid LOV connection handle %#llx\n",
2829                        sbi->ll_dt_exp->exp_handle.h_cookie);
2830                 EXIT;
2831                 return;
2832         }
2833         obd->obd_force = 1;
2834
2835         OBD_ALLOC_PTR(ioc_data);
2836         if (ioc_data) {
2837                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2838                               sizeof *ioc_data, ioc_data, NULL);
2839
2840                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2841                               sizeof *ioc_data, ioc_data, NULL);
2842
2843                 OBD_FREE_PTR(ioc_data);
2844         }
2845
2846         /* Really, we'd like to wait until there are no requests outstanding,
2847          * and then continue.  For now, we just periodically checking for vfs
2848          * to decrement mnt_cnt and hope to finish it within 10sec.
2849          */
2850         cnt = 10;
2851         while (cnt > 0 &&
2852                !may_umount(sbi->ll_mnt.mnt)) {
2853                 ssleep(1);
2854                 cnt -= 1;
2855         }
2856
2857         EXIT;
2858 }
2859
2860 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2861 {
2862         struct ll_sb_info *sbi = ll_s2sbi(sb);
2863         char *profilenm = get_profile_name(sb);
2864         int err;
2865         __u32 read_only;
2866
2867         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
2868                 read_only = *flags & MS_RDONLY;
2869                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2870                                          sizeof(KEY_READ_ONLY),
2871                                          KEY_READ_ONLY, sizeof(read_only),
2872                                          &read_only, NULL);
2873                 if (err) {
2874                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2875                                       profilenm, read_only ?
2876                                       "read-only" : "read-write", err);
2877                         return err;
2878                 }
2879
2880                 if (read_only)
2881                         sb->s_flags |= SB_RDONLY;
2882                 else
2883                         sb->s_flags &= ~SB_RDONLY;
2884
2885                 if (sbi->ll_flags & LL_SBI_VERBOSE)
2886                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2887                                       read_only ?  "read-only" : "read-write");
2888         }
2889         return 0;
2890 }
2891
2892 /**
2893  * Cleanup the open handle that is cached on MDT-side.
2894  *
2895  * For open case, the client side open handling thread may hit error
2896  * after the MDT grant the open. Under such case, the client should
2897  * send close RPC to the MDT as cleanup; otherwise, the open handle
2898  * on the MDT will be leaked there until the client umount or evicted.
2899  *
2900  * In further, if someone unlinked the file, because the open handle
2901  * holds the reference on such file/object, then it will block the
2902  * subsequent threads that want to locate such object via FID.
2903  *
2904  * \param[in] sb        super block for this file-system
2905  * \param[in] open_req  pointer to the original open request
2906  */
2907 void ll_open_cleanup(struct super_block *sb, struct req_capsule *pill)
2908 {
2909         struct mdt_body                 *body;
2910         struct md_op_data               *op_data;
2911         struct ptlrpc_request           *close_req = NULL;
2912         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
2913         ENTRY;
2914
2915         body = req_capsule_server_get(pill, &RMF_MDT_BODY);
2916         OBD_ALLOC_PTR(op_data);
2917         if (op_data == NULL) {
2918                 CWARN("%s: cannot allocate op_data to release open handle for "
2919                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
2920
2921                 RETURN_EXIT;
2922         }
2923
2924         op_data->op_fid1 = body->mbo_fid1;
2925         op_data->op_open_handle = body->mbo_open_handle;
2926         op_data->op_mod_time = ktime_get_real_seconds();
2927         md_close(exp, op_data, NULL, &close_req);
2928         ptlrpc_req_finished(close_req);
2929         ll_finish_md_op_data(op_data);
2930
2931         EXIT;
2932 }
2933
2934 int ll_prep_inode(struct inode **inode, struct req_capsule *pill,
2935                   struct super_block *sb, struct lookup_intent *it)
2936 {
2937         struct ll_sb_info *sbi = NULL;
2938         struct lustre_md md = { NULL };
2939         bool default_lmv_deleted = false;
2940         int rc;
2941
2942         ENTRY;
2943
2944         LASSERT(*inode || sb);
2945         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2946         rc = md_get_lustre_md(sbi->ll_md_exp, pill, sbi->ll_dt_exp,
2947                               sbi->ll_md_exp, &md);
2948         if (rc != 0)
2949                 GOTO(out, rc);
2950
2951         /*
2952          * clear default_lmv only if intent_getattr reply doesn't contain it.
2953          * but it needs to be done after iget, check this early because
2954          * ll_update_lsm_md() may change md.
2955          */
2956         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
2957             S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
2958                 default_lmv_deleted = true;
2959
2960         if (*inode) {
2961                 rc = ll_update_inode(*inode, &md);
2962                 if (rc != 0)
2963                         GOTO(out, rc);
2964         } else {
2965                 LASSERT(sb != NULL);
2966
2967                 /*
2968                  * At this point server returns to client's same fid as client
2969                  * generated for creating. So using ->fid1 is okay here.
2970                  */
2971                 if (!fid_is_sane(&md.body->mbo_fid1)) {
2972                         CERROR("%s: Fid is insane "DFID"\n",
2973                                 sbi->ll_fsname,
2974                                 PFID(&md.body->mbo_fid1));
2975                         GOTO(out, rc = -EINVAL);
2976                 }
2977
2978                 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
2979                                              sbi->ll_flags & LL_SBI_32BIT_API),
2980                                  &md);
2981                 if (IS_ERR(*inode)) {
2982                         lmd_clear_acl(&md);
2983                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2984                         *inode = NULL;
2985                         CERROR("new_inode -fatal: rc %d\n", rc);
2986                         GOTO(out, rc);
2987                 }
2988         }
2989
2990         /* Handling piggyback layout lock.
2991          * Layout lock can be piggybacked by getattr and open request.
2992          * The lsm can be applied to inode only if it comes with a layout lock
2993          * otherwise correct layout may be overwritten, for example:
2994          * 1. proc1: mdt returns a lsm but not granting layout
2995          * 2. layout was changed by another client
2996          * 3. proc2: refresh layout and layout lock granted
2997          * 4. proc1: to apply a stale layout */
2998         if (it != NULL && it->it_lock_mode != 0) {
2999                 struct lustre_handle lockh;
3000                 struct ldlm_lock *lock;
3001
3002                 lockh.cookie = it->it_lock_handle;
3003                 lock = ldlm_handle2lock(&lockh);
3004                 LASSERT(lock != NULL);
3005                 if (ldlm_has_layout(lock)) {
3006                         struct cl_object_conf conf;
3007
3008                         memset(&conf, 0, sizeof(conf));
3009                         conf.coc_opc = OBJECT_CONF_SET;
3010                         conf.coc_inode = *inode;
3011                         conf.coc_lock = lock;
3012                         conf.u.coc_layout = md.layout;
3013                         (void)ll_layout_conf(*inode, &conf);
3014                 }
3015                 LDLM_LOCK_PUT(lock);
3016         }
3017
3018         if (default_lmv_deleted)
3019                 ll_update_default_lsm_md(*inode, &md);
3020
3021         /* we may want to apply some policy for foreign file/dir */
3022         if (ll_sbi_has_foreign_symlink(sbi)) {
3023                 rc = ll_manage_foreign(*inode, &md);
3024                 if (rc < 0)
3025                         GOTO(out, rc);
3026         }
3027
3028         GOTO(out, rc = 0);
3029
3030 out:
3031         /* cleanup will be done if necessary */
3032         md_free_lustre_md(sbi->ll_md_exp, &md);
3033
3034         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
3035                 ll_intent_drop_lock(it);
3036                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, pill);
3037         }
3038
3039         return rc;
3040 }
3041
3042 int ll_obd_statfs(struct inode *inode, void __user *arg)
3043 {
3044         struct ll_sb_info *sbi = NULL;
3045         struct obd_export *exp;
3046         struct obd_ioctl_data *data = NULL;
3047         __u32 type;
3048         int len = 0, rc;
3049
3050         if (inode)
3051                 sbi = ll_i2sbi(inode);
3052         if (!sbi)
3053                 GOTO(out_statfs, rc = -EINVAL);
3054
3055         rc = obd_ioctl_getdata(&data, &len, arg);
3056         if (rc)
3057                 GOTO(out_statfs, rc);
3058
3059         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
3060             !data->ioc_pbuf1 || !data->ioc_pbuf2)
3061                 GOTO(out_statfs, rc = -EINVAL);
3062
3063         if (data->ioc_inllen1 != sizeof(__u32) ||
3064             data->ioc_inllen2 != sizeof(__u32) ||
3065             data->ioc_plen1 != sizeof(struct obd_statfs) ||
3066             data->ioc_plen2 != sizeof(struct obd_uuid))
3067                 GOTO(out_statfs, rc = -EINVAL);
3068
3069         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
3070         if (type & LL_STATFS_LMV)
3071                 exp = sbi->ll_md_exp;
3072         else if (type & LL_STATFS_LOV)
3073                 exp = sbi->ll_dt_exp;
3074         else
3075                 GOTO(out_statfs, rc = -ENODEV);
3076
3077         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL);
3078         if (rc)
3079                 GOTO(out_statfs, rc);
3080 out_statfs:
3081         OBD_FREE_LARGE(data, len);
3082         return rc;
3083 }
3084
3085 /*
3086  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
3087  * be called early to avoid deadlock.
3088  */
3089 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
3090 {
3091         if (op_data->op_mea2_sem) {
3092                 up_read_non_owner(op_data->op_mea2_sem);
3093                 op_data->op_mea2_sem = NULL;
3094         }
3095
3096         if (op_data->op_mea1_sem) {
3097                 up_read_non_owner(op_data->op_mea1_sem);
3098                 op_data->op_mea1_sem = NULL;
3099         }
3100 }
3101
3102 /* this function prepares md_op_data hint for passing it down to MD stack. */
3103 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
3104                                       struct inode *i1, struct inode *i2,
3105                                       const char *name, size_t namelen,
3106                                       __u32 mode, enum md_op_code opc,
3107                                       void *data)
3108 {
3109         struct llcrypt_name fname = { 0 };
3110         int rc;
3111
3112         LASSERT(i1 != NULL);
3113
3114         if (name == NULL) {
3115                 /* Do not reuse namelen for something else. */
3116                 if (namelen != 0)
3117                         return ERR_PTR(-EINVAL);
3118         } else {
3119                 if (namelen > ll_i2sbi(i1)->ll_namelen)
3120                         return ERR_PTR(-ENAMETOOLONG);
3121
3122                 /* "/" is not valid name, but it's allowed */
3123                 if (!lu_name_is_valid_2(name, namelen) &&
3124                     strncmp("/", name, namelen) != 0)
3125                         return ERR_PTR(-EINVAL);
3126         }
3127
3128         if (op_data == NULL)
3129                 OBD_ALLOC_PTR(op_data);
3130
3131         if (op_data == NULL)
3132                 return ERR_PTR(-ENOMEM);
3133
3134         ll_i2gids(op_data->op_suppgids, i1, i2);
3135         op_data->op_fid1 = *ll_inode2fid(i1);
3136
3137         if (S_ISDIR(i1->i_mode)) {
3138                 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
3139                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
3140                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
3141                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
3142         }
3143
3144         if (i2) {
3145                 op_data->op_fid2 = *ll_inode2fid(i2);
3146                 if (S_ISDIR(i2->i_mode)) {
3147                         if (i2 != i1) {
3148                                 /* i2 is typically a child of i1, and MUST be
3149                                  * further from the root to avoid deadlocks.
3150                                  */
3151                                 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
3152                                 op_data->op_mea2_sem =
3153                                                 &ll_i2info(i2)->lli_lsm_sem;
3154                         }
3155                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
3156                 }
3157         } else {
3158                 fid_zero(&op_data->op_fid2);
3159         }
3160
3161         if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
3162                 op_data->op_cli_flags |= CLI_HASH64;
3163
3164         if (ll_need_32bit_api(ll_i2sbi(i1)))
3165                 op_data->op_cli_flags |= CLI_API32;
3166
3167         if (opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_CREATE) {
3168                 /* In case of lookup, ll_setup_filename() has already been
3169                  * called in ll_lookup_it(), so just take provided name.
3170                  */
3171                 fname.disk_name.name = (unsigned char *)name;
3172                 fname.disk_name.len = namelen;
3173         } else if (name && namelen) {
3174                 struct qstr dname = QSTR_INIT(name, namelen);
3175                 struct inode *dir;
3176                 struct lu_fid *pfid = NULL;
3177                 struct lu_fid fid;
3178                 int lookup;
3179
3180                 if (!S_ISDIR(i1->i_mode) && i2 && S_ISDIR(i2->i_mode)) {
3181                         /* special case when called from ll_link() */
3182                         dir = i2;
3183                         lookup = 0;
3184                 } else {
3185                         dir = i1;
3186                         lookup = (int)(opc == LUSTRE_OPC_ANY);
3187                 }
3188                 if (opc == LUSTRE_OPC_ANY && lookup)
3189                         pfid = &fid;
3190                 rc = ll_setup_filename(dir, &dname, lookup, &fname, pfid);
3191                 if (rc) {
3192                         ll_finish_md_op_data(op_data);
3193                         return ERR_PTR(rc);
3194                 }
3195                 if (pfid && !fid_is_zero(pfid)) {
3196                         if (i2 == NULL)
3197                                 op_data->op_fid2 = fid;
3198                         op_data->op_bias = MDS_FID_OP;
3199                 }
3200                 if (fname.disk_name.name &&
3201                     fname.disk_name.name != (unsigned char *)name)
3202                         /* op_data->op_name must be freed after use */
3203                         op_data->op_flags |= MF_OPNAME_KMALLOCED;
3204         }
3205
3206         /* In fact LUSTRE_OPC_LOOKUP, LUSTRE_OPC_OPEN, LUSTRE_OPC_MIGR
3207          * are LUSTRE_OPC_ANY
3208          */
3209         if (opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_OPEN ||
3210             opc == LUSTRE_OPC_MIGR)
3211                 op_data->op_code = LUSTRE_OPC_ANY;
3212         else
3213                 op_data->op_code = opc;
3214         op_data->op_name = fname.disk_name.name;
3215         op_data->op_namelen = fname.disk_name.len;
3216         op_data->op_mode = mode;
3217         op_data->op_mod_time = ktime_get_real_seconds();
3218         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
3219         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
3220         op_data->op_cap = current_cap();
3221         op_data->op_mds = 0;
3222         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
3223              filename_is_volatile(name, namelen, &op_data->op_mds)) {
3224                 op_data->op_bias |= MDS_CREATE_VOLATILE;
3225         }
3226         op_data->op_data = data;
3227
3228         return op_data;
3229 }
3230
3231 void ll_finish_md_op_data(struct md_op_data *op_data)
3232 {
3233         ll_unlock_md_op_lsm(op_data);
3234         ll_security_release_secctx(op_data->op_file_secctx,
3235                                    op_data->op_file_secctx_size);
3236         if (op_data->op_flags & MF_OPNAME_KMALLOCED)
3237                 /* allocated via ll_setup_filename called
3238                  * from ll_prep_md_op_data
3239                  */
3240                 kfree(op_data->op_name);
3241         llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size);
3242         OBD_FREE_PTR(op_data);
3243 }
3244
3245 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
3246 {
3247         struct ll_sb_info *sbi;
3248
3249         LASSERT(seq && dentry);
3250         sbi = ll_s2sbi(dentry->d_sb);
3251
3252         if (sbi->ll_flags & LL_SBI_NOLCK)
3253                 seq_puts(seq, ",nolock");
3254
3255         /* "flock" is the default since 2.13, but it wasn't for many years,
3256          * so it is still useful to print this to show it is enabled.
3257          * Start to print "noflock" so it is now clear when flock is disabled.
3258          */
3259         if (sbi->ll_flags & LL_SBI_FLOCK)
3260                 seq_puts(seq, ",flock");
3261         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
3262                 seq_puts(seq, ",localflock");
3263         else
3264                 seq_puts(seq, ",noflock");
3265
3266         if (sbi->ll_flags & LL_SBI_USER_XATTR)
3267                 seq_puts(seq, ",user_xattr");
3268
3269         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
3270                 seq_puts(seq, ",lazystatfs");
3271
3272         if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
3273                 seq_puts(seq, ",user_fid2path");
3274
3275         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
3276                 seq_puts(seq, ",always_ping");
3277
3278         if (ll_sbi_has_test_dummy_encryption(sbi))
3279                 seq_puts(seq, ",test_dummy_encryption");
3280
3281         if (ll_sbi_has_encrypt(sbi))
3282                 seq_puts(seq, ",encrypt");
3283         else
3284                 seq_puts(seq, ",noencrypt");
3285
3286         if (sbi->ll_flags & LL_SBI_FOREIGN_SYMLINK) {
3287                 seq_puts(seq, ",foreign_symlink=");
3288                 seq_puts(seq, sbi->ll_foreign_symlink_prefix);
3289         }
3290
3291         RETURN(0);
3292 }
3293
3294 /**
3295  * Get obd name by cmd, and copy out to user space
3296  */
3297 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3298 {
3299         struct ll_sb_info *sbi = ll_i2sbi(inode);
3300         struct obd_device *obd;
3301         ENTRY;
3302
3303         if (cmd == OBD_IOC_GETNAME_OLD || cmd == OBD_IOC_GETDTNAME)
3304                 obd = class_exp2obd(sbi->ll_dt_exp);
3305         else if (cmd == OBD_IOC_GETMDNAME)
3306                 obd = class_exp2obd(sbi->ll_md_exp);
3307         else
3308                 RETURN(-EINVAL);
3309
3310         if (!obd)
3311                 RETURN(-ENOENT);
3312
3313         if (copy_to_user((void __user *)arg, obd->obd_name,
3314                          strlen(obd->obd_name) + 1))
3315                 RETURN(-EFAULT);
3316
3317         RETURN(0);
3318 }
3319
3320 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3321 {
3322         char *path = NULL;
3323
3324         struct path p;
3325
3326         p.dentry = dentry;
3327         p.mnt = current->fs->root.mnt;
3328         path_get(&p);
3329         path = d_path(&p, buf, bufsize);
3330         path_put(&p);
3331         return path;
3332 }
3333
3334 void ll_dirty_page_discard_warn(struct page *page, int ioret)
3335 {
3336         char *buf, *path = NULL;
3337         struct dentry *dentry = NULL;
3338         struct inode *inode = page->mapping->host;
3339
3340         /* this can be called inside spin lock so use GFP_ATOMIC. */
3341         buf = (char *)__get_free_page(GFP_ATOMIC);
3342         if (buf != NULL) {
3343                 dentry = d_find_alias(page->mapping->host);
3344                 if (dentry != NULL)
3345                         path = ll_d_path(dentry, buf, PAGE_SIZE);
3346         }
3347
3348         /* The below message is checked in recovery-small.sh test_24b */
3349         CDEBUG(D_WARNING,
3350                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3351                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3352                s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
3353                PFID(ll_inode2fid(inode)),
3354                (path && !IS_ERR(path)) ? path : "", ioret);
3355
3356         if (dentry != NULL)
3357                 dput(dentry);
3358
3359         if (buf != NULL)
3360                 free_page((unsigned long)buf);
3361 }
3362
3363 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3364                         struct lov_user_md **kbuf)
3365 {
3366         struct lov_user_md      lum;
3367         ssize_t                 lum_size;
3368         ENTRY;
3369
3370         if (copy_from_user(&lum, md, sizeof(lum)))
3371                 RETURN(-EFAULT);
3372
3373         lum_size = ll_lov_user_md_size(&lum);
3374         if (lum_size < 0)
3375                 RETURN(lum_size);
3376
3377         OBD_ALLOC_LARGE(*kbuf, lum_size);
3378         if (*kbuf == NULL)
3379                 RETURN(-ENOMEM);
3380
3381         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3382                 OBD_FREE_LARGE(*kbuf, lum_size);
3383                 RETURN(-EFAULT);
3384         }
3385
3386         RETURN(lum_size);
3387 }
3388
3389 /*
3390  * Compute llite root squash state after a change of root squash
3391  * configuration setting or add/remove of a lnet nid
3392  */
3393 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3394 {
3395         struct root_squash_info *squash = &sbi->ll_squash;
3396         int i;
3397         bool matched;
3398         struct lnet_process_id id;
3399
3400         /* Update norootsquash flag */
3401         spin_lock(&squash->rsi_lock);
3402         if (list_empty(&squash->rsi_nosquash_nids))
3403                 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3404         else {
3405                 /* Do not apply root squash as soon as one of our NIDs is
3406                  * in the nosquash_nids list */
3407                 matched = false;
3408                 i = 0;
3409                 while (LNetGetId(i++, &id) != -ENOENT) {
3410                         if (id.nid == LNET_NID_LO_0)
3411                                 continue;
3412                         if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
3413                                 matched = true;
3414                                 break;
3415                         }
3416                 }
3417                 if (matched)
3418                         sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
3419                 else
3420                         sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3421         }
3422         spin_unlock(&squash->rsi_lock);
3423 }
3424
3425 /**
3426  * Parse linkea content to extract information about a given hardlink
3427  *
3428  * \param[in]   ldata      - Initialized linkea data
3429  * \param[in]   linkno     - Link identifier
3430  * \param[out]  parent_fid - The entry's parent FID
3431  * \param[out]  ln         - Entry name destination buffer
3432  *
3433  * \retval 0 on success
3434  * \retval Appropriate negative error code on failure
3435  */
3436 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3437                             struct lu_fid *parent_fid, struct lu_name *ln)
3438 {
3439         unsigned int    idx;
3440         int             rc;
3441         ENTRY;
3442
3443         rc = linkea_init_with_rec(ldata);
3444         if (rc < 0)
3445                 RETURN(rc);
3446
3447         if (linkno >= ldata->ld_leh->leh_reccount)
3448                 /* beyond last link */
3449                 RETURN(-ENODATA);
3450
3451         linkea_first_entry(ldata);
3452         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3453                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3454                                     parent_fid);
3455                 if (idx == linkno)
3456                         break;
3457
3458                 linkea_next_entry(ldata);
3459         }
3460
3461         if (idx < linkno)
3462                 RETURN(-ENODATA);
3463
3464         RETURN(0);
3465 }
3466
3467 /**
3468  * Get parent FID and name of an identified link. Operation is performed for
3469  * a given link number, letting the caller iterate over linkno to list one or
3470  * all links of an entry.
3471  *
3472  * \param[in]     file - File descriptor against which to perform the operation
3473  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3474  *                       on and the available size. It is eventually filled with
3475  *                       the requested information or left untouched on error
3476  *
3477  * \retval - 0 on success
3478  * \retval - Appropriate negative error code on failure
3479  */
3480 int ll_getparent(struct file *file, struct getparent __user *arg)
3481 {
3482         struct inode            *inode = file_inode(file);
3483         struct linkea_data      *ldata;
3484         struct lu_buf            buf = LU_BUF_NULL;
3485         struct lu_name           ln;
3486         struct lu_fid            parent_fid;
3487         __u32                    linkno;
3488         __u32                    name_size;
3489         int                      rc;
3490
3491         ENTRY;
3492
3493         if (!capable(CAP_DAC_READ_SEARCH) &&
3494             !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
3495                 RETURN(-EPERM);
3496
3497         if (get_user(name_size, &arg->gp_name_size))
3498                 RETURN(-EFAULT);
3499
3500         if (get_user(linkno, &arg->gp_linkno))
3501                 RETURN(-EFAULT);
3502
3503         if (name_size > PATH_MAX)
3504                 RETURN(-EINVAL);
3505
3506         OBD_ALLOC(ldata, sizeof(*ldata));
3507         if (ldata == NULL)
3508                 RETURN(-ENOMEM);
3509
3510         rc = linkea_data_new(ldata, &buf);
3511         if (rc < 0)
3512                 GOTO(ldata_free, rc);
3513
3514         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3515                            buf.lb_len, OBD_MD_FLXATTR);
3516         if (rc < 0)
3517                 GOTO(lb_free, rc);
3518
3519         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3520         if (rc < 0)
3521                 GOTO(lb_free, rc);
3522
3523         if (ln.ln_namelen >= name_size)
3524                 GOTO(lb_free, rc = -EOVERFLOW);
3525
3526         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3527                 GOTO(lb_free, rc = -EFAULT);
3528
3529         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3530                 GOTO(lb_free, rc = -EFAULT);
3531
3532         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3533                 GOTO(lb_free, rc = -EFAULT);
3534
3535 lb_free:
3536         lu_buf_free(&buf);
3537 ldata_free:
3538         OBD_FREE(ldata, sizeof(*ldata));
3539
3540         RETURN(rc);
3541 }