Whamcloud - gitweb
59c857a21f8db4c518ac2f17af96976401fb2722
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Light Super operations
5  *
6  *  Copyright (c) 2002-2005 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LLITE
25
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/random.h>
29 #include <linux/version.h>
30
31 #include <lustre_lite.h>
32 #include <lustre_ha.h>
33 #include <lustre_dlm.h>
34 #include <lprocfs_status.h>
35 #include <lustre_disk.h>
36 #include <lustre_param.h>
37 #include <lustre_log.h>
38 #include <obd_cksum.h>
39 #include <lustre_cache.h>
40 #include "llite_internal.h"
41
42 cfs_mem_cache_t *ll_file_data_slab;
43
44 LIST_HEAD(ll_super_blocks);
45 spinlock_t ll_sb_lock = SPIN_LOCK_UNLOCKED;
46
47 extern struct address_space_operations ll_aops;
48 extern struct address_space_operations ll_dir_aops;
49
50 #ifndef log2
51 #define log2(n) ffz(~(n))
52 #endif
53
54
55 static struct ll_sb_info *ll_init_sbi(void)
56 {
57         struct ll_sb_info *sbi = NULL;
58         class_uuid_t uuid;
59         int i;
60         ENTRY;
61
62         OBD_ALLOC(sbi, sizeof(*sbi));
63         if (!sbi)
64                 RETURN(NULL);
65
66         spin_lock_init(&sbi->ll_lock);
67         spin_lock_init(&sbi->ll_lco.lco_lock);
68         spin_lock_init(&sbi->ll_pp_extent_lock);
69         spin_lock_init(&sbi->ll_process_lock);
70         sbi->ll_rw_stats_on = 0;
71         INIT_LIST_HEAD(&sbi->ll_pglist);
72         if (num_physpages >> (20 - CFS_PAGE_SHIFT) < 512)
73                 sbi->ll_async_page_max = num_physpages / 2;
74         else
75                 sbi->ll_async_page_max = (num_physpages / 4) * 3;
76         sbi->ll_ra_info.ra_max_pages = min(num_physpages / 8,
77                                            SBI_DEFAULT_READAHEAD_MAX);
78         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
79                                            SBI_DEFAULT_READAHEAD_WHOLE_MAX;
80         sbi->ll_contention_time = SBI_DEFAULT_CONTENTION_SECONDS;
81         sbi->ll_lockless_truncate_enable = SBI_DEFAULT_LOCKLESS_TRUNCATE_ENABLE;
82         INIT_LIST_HEAD(&sbi->ll_conn_chain);
83         INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
84
85         ll_generate_random_uuid(uuid);
86         class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
87         CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
88
89         spin_lock(&ll_sb_lock);
90         list_add_tail(&sbi->ll_list, &ll_super_blocks);
91         spin_unlock(&ll_sb_lock);
92
93 #ifdef ENABLE_LLITE_CHECKSUM
94         sbi->ll_flags |= LL_SBI_CHECKSUM;
95 #endif
96
97 #ifdef HAVE_LRU_RESIZE_SUPPORT
98         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
99 #endif
100
101 #ifdef HAVE_EXPORT___IGET
102         INIT_LIST_HEAD(&sbi->ll_deathrow);
103         spin_lock_init(&sbi->ll_deathrow_lock);
104 #endif
105         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
106                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].pp_r_hist.oh_lock);
107                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].pp_w_hist.oh_lock);
108         }
109
110         /* metadata statahead is enabled by default */
111         sbi->ll_sa_max = LL_SA_RPC_DEF;
112
113         RETURN(sbi);
114 }
115
116 void ll_free_sbi(struct super_block *sb)
117 {
118         struct ll_sb_info *sbi = ll_s2sbi(sb);
119         ENTRY;
120
121         if (sbi != NULL) {
122                 spin_lock(&ll_sb_lock);
123                 list_del(&sbi->ll_list);
124                 spin_unlock(&ll_sb_lock);
125                 OBD_FREE(sbi, sizeof(*sbi));
126         }
127         EXIT;
128 }
129
130 static struct dentry_operations ll_d_root_ops = {
131 #ifdef DCACHE_LUSTRE_INVALID
132         .d_compare = ll_dcompare,
133 #endif
134 };
135
136 /* Initialize the default and maximum LOV EA and cookie sizes.  This allows
137  * us to make MDS RPCs with large enough reply buffers to hold the
138  * maximum-sized (= maximum striped) EA and cookie without having to
139  * calculate this (via a call into the LOV + OSCs) each time we make an RPC. */
140 static int ll_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
141 {
142         struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC };
143         __u32 valsize = sizeof(struct lov_desc);
144         int rc, easize, def_easize, cookiesize;
145         struct lov_desc desc;
146         __u32 stripes;
147         ENTRY;
148
149         rc = obd_get_info(dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC,
150                           &valsize, &desc);
151         if (rc)
152                 RETURN(rc);
153
154         stripes = min(desc.ld_tgt_count, (__u32)LOV_MAX_STRIPE_COUNT);
155         lsm.lsm_stripe_count = stripes;
156         easize = obd_size_diskmd(dt_exp, &lsm);
157
158         lsm.lsm_stripe_count = desc.ld_default_stripe_count;
159         def_easize = obd_size_diskmd(dt_exp, &lsm);
160
161         cookiesize = stripes * sizeof(struct llog_cookie);
162
163         CDEBUG(D_HA, "updating max_mdsize/max_cookiesize: %d/%d\n",
164                easize, cookiesize);
165
166         rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize);
167         RETURN(rc);
168 }
169
170 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
171 {
172         struct inode *root = 0;
173         struct ll_sb_info *sbi = ll_s2sbi(sb);
174         struct obd_device *obd;
175         struct lu_fid rootfid;
176         struct obd_capa *oc = NULL;
177         struct obd_statfs osfs;
178         struct ptlrpc_request *request = NULL;
179         struct lustre_handle dt_conn = {0, };
180         struct lustre_handle md_conn = {0, };
181         struct obd_connect_data *data = NULL;
182         struct lustre_md lmd;
183         obd_valid valid;
184         int size, err, checksum;
185         ENTRY;
186
187         obd = class_name2obd(md);
188         if (!obd) {
189                 CERROR("MD %s: not setup or attached\n", md);
190                 RETURN(-EINVAL);
191         }
192
193         OBD_ALLOC_PTR(data);
194         if (data == NULL)
195                 RETURN(-ENOMEM);
196
197         if (proc_lustre_fs_root) {
198                 err = lprocfs_register_mountpoint(proc_lustre_fs_root, sb,
199                                                   dt, md);
200                 if (err < 0)
201                         CERROR("could not register mount in /proc/fs/lustre\n");
202         }
203
204         /* indicate the features supported by this client */
205         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
206                                   OBD_CONNECT_JOIN     | OBD_CONNECT_ATTRFID  |
207                                   OBD_CONNECT_VERSION  | OBD_CONNECT_MDS_CAPA |
208                                   OBD_CONNECT_OSS_CAPA | OBD_CONNECT_CANCELSET|
209                                   OBD_CONNECT_FID;
210
211 #ifdef HAVE_LRU_RESIZE_SUPPORT
212         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
213                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
214 #endif
215 #ifdef CONFIG_FS_POSIX_ACL
216         data->ocd_connect_flags |= OBD_CONNECT_ACL;
217 #endif
218         data->ocd_ibits_known = MDS_INODELOCK_FULL;
219         data->ocd_version = LUSTRE_VERSION_CODE;
220
221         if (sb->s_flags & MS_RDONLY)
222                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
223         if (sbi->ll_flags & LL_SBI_USER_XATTR)
224                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
225
226 #ifdef HAVE_MS_FLOCK_LOCK
227         /* force vfs to use lustre handler for flock() calls - bug 10743 */
228         sb->s_flags |= MS_FLOCK_LOCK;
229 #endif
230         
231         if (sbi->ll_flags & LL_SBI_FLOCK)
232                 sbi->ll_fop = &ll_file_operations_flock;
233         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
234                 sbi->ll_fop = &ll_file_operations;
235         else
236                 sbi->ll_fop = &ll_file_operations_noflock;
237
238         /* real client */
239         data->ocd_connect_flags |= OBD_CONNECT_REAL;
240         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
241                 data->ocd_connect_flags &= ~OBD_CONNECT_LCL_CLIENT;
242                 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT;
243         } else {
244                 data->ocd_connect_flags &= ~OBD_CONNECT_RMT_CLIENT;
245                 data->ocd_connect_flags |= OBD_CONNECT_LCL_CLIENT;
246         }
247
248         err = obd_connect(NULL, &md_conn, obd, &sbi->ll_sb_uuid, data, NULL);
249         if (err == -EBUSY) {
250                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
251                                    "recovery, of which this client is not a "
252                                    "part. Please wait for recovery to complete,"
253                                    " abort, or time out.\n", md);
254                 GOTO(out, err);
255         } else if (err) {
256                 CERROR("cannot connect to %s: rc = %d\n", md, err);
257                 GOTO(out, err);
258         }
259         sbi->ll_md_exp = class_conn2export(&md_conn);
260
261         err = obd_fid_init(sbi->ll_md_exp);
262         if (err) {
263                 CERROR("Can't init metadata layer FID infrastructure, "
264                        "rc %d\n", err);
265                 GOTO(out_md, err);
266         }
267
268         err = obd_statfs(obd, &osfs, cfs_time_current_64() - HZ, 0);
269         if (err)
270                 GOTO(out_md_fid, err);
271
272         size = sizeof(*data);
273         err = obd_get_info(sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
274                            KEY_CONN_DATA,  &size, data);
275         if (err) {
276                 CERROR("Get connect data failed: %d \n", err);
277                 GOTO(out_md, err);
278         }
279
280         LASSERT(osfs.os_bsize);
281         sb->s_blocksize = osfs.os_bsize;
282         sb->s_blocksize_bits = log2(osfs.os_bsize);
283         sb->s_magic = LL_SUPER_MAGIC;
284
285         /* for bug 11559. in $LINUX/fs/read_write.c, function do_sendfile():
286          *         retval = in_file->f_op->sendfile(...);
287          *         if (*ppos > max)
288          *                 retval = -EOVERFLOW;
289          *
290          * it will check if *ppos is greater than max. However, max equals to
291          * s_maxbytes, which is a negative integer in a x86_64 box since loff_t
292          * has been defined as a signed long long ineger in linux kernel. */
293 #if BITS_PER_LONG == 64
294         sb->s_maxbytes = PAGE_CACHE_MAXBYTES >> 1;
295 #else
296         sb->s_maxbytes = PAGE_CACHE_MAXBYTES;
297 #endif
298         sbi->ll_namelen = osfs.os_namelen;
299         sbi->ll_max_rw_chunk = LL_DEFAULT_MAX_RW_CHUNK;
300
301         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
302             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
303                 LCONSOLE_INFO("Disabling user_xattr feature because "
304                               "it is not supported on the server\n");
305                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
306         }
307
308         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
309 #ifdef MS_POSIXACL
310                 sb->s_flags |= MS_POSIXACL;
311 #endif
312                 sbi->ll_flags |= LL_SBI_ACL;
313         } else {
314                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
315 #ifdef MS_POSIXACL
316                 sb->s_flags &= ~MS_POSIXACL;
317 #endif
318                 sbi->ll_flags &= ~LL_SBI_ACL;
319         }
320
321         if (data->ocd_connect_flags & OBD_CONNECT_JOIN)
322                 sbi->ll_flags |= LL_SBI_JOIN;
323
324         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
325                 if (!(data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT)) {
326                         /* sometimes local client claims to be remote, but mdt
327                          * will disagree when client gss not applied. */
328                         LCONSOLE_INFO("client claims to be remote, but server "
329                                       "rejected, forced to be local.\n");
330                         sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
331                 }
332         } else {
333                 if (!(data->ocd_connect_flags & OBD_CONNECT_LCL_CLIENT)) {
334                         /* with gss applied, remote client can not claim to be
335                          * local, so mdt maybe force client to be remote. */
336                         LCONSOLE_INFO("client claims to be local, but server "
337                                       "rejected, forced to be remote.\n");
338                         sbi->ll_flags |= LL_SBI_RMT_CLIENT;
339                 }
340         }
341
342         if (data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) {
343                 LCONSOLE_INFO("client enabled MDS capability!\n");
344                 sbi->ll_flags |= LL_SBI_MDS_CAPA;
345         }
346
347         if (data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA) {
348                 LCONSOLE_INFO("client enabled OSS capability!\n");
349                 sbi->ll_flags |= LL_SBI_OSS_CAPA;
350         }
351
352         sbi->ll_sdev_orig = sb->s_dev;
353 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
354         /* We set sb->s_dev equal on all lustre clients in order to support
355          * NFS export clustering.  NFSD requires that the FSID be the same
356          * on all clients. */
357         /* s_dev is also used in lt_compare() to compare two fs, but that is
358          * only a node-local comparison. */
359
360         /* XXX: this will not work with LMV */
361         sb->s_dev = get_uuid2int(sbi2mdc(sbi)->cl_target_uuid.uuid,
362                                  strlen(sbi2mdc(sbi)->cl_target_uuid.uuid));
363 #endif
364
365         obd = class_name2obd(dt);
366         if (!obd) {
367                 CERROR("DT %s: not setup or attached\n", dt);
368                 GOTO(out_md_fid, err = -ENODEV);
369         }
370
371         data->ocd_connect_flags = OBD_CONNECT_GRANT     | OBD_CONNECT_VERSION  |
372                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
373                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID      |
374                                   OBD_CONNECT_SRVLOCK   | OBD_CONNECT_TRUNCLOCK;
375         if (sbi->ll_flags & LL_SBI_OSS_CAPA)
376                 data->ocd_connect_flags |= OBD_CONNECT_OSS_CAPA;
377
378         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
379                 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
380                  * disabled by default, because it can still be enabled on the
381                  * fly via /proc. As a consequence, we still need to come to an
382                  * agreement on the supported algorithms at connect time */
383                 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
384
385                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
386                         data->ocd_cksum_types = OBD_CKSUM_ADLER;
387                 else
388                         /* send the list of supported checksum types */
389                         data->ocd_cksum_types = OBD_CKSUM_ALL;
390         }
391
392 #ifdef HAVE_LRU_RESIZE_SUPPORT
393         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
394 #endif
395         CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d "
396                "ocd_grant: %d\n", data->ocd_connect_flags,
397                data->ocd_version, data->ocd_grant);
398
399         obd->obd_upcall.onu_owner = &sbi->ll_lco;
400         obd->obd_upcall.onu_upcall = ll_ocd_update;
401         data->ocd_brw_size = PTLRPC_MAX_BRW_PAGES << CFS_PAGE_SHIFT;
402
403         err = obd_connect(NULL, &dt_conn, obd, &sbi->ll_sb_uuid, data, NULL);
404         if (err == -EBUSY) {
405                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
406                                    "recovery, of which this client is not a "
407                                    "part.  Please wait for recovery to "
408                                    "complete, abort, or time out.\n", dt);
409                 GOTO(out_md_fid, err);
410         } else if (err) {
411                 CERROR("Cannot connect to %s: rc = %d\n", dt, err);
412                 GOTO(out_md_fid, err);
413         }
414
415         sbi->ll_dt_exp = class_conn2export(&dt_conn);
416
417         err = obd_fid_init(sbi->ll_dt_exp);
418         if (err) {
419                 CERROR("Can't init data layer FID infrastructure, "
420                        "rc %d\n", err);
421                 GOTO(out_dt, err);
422         }
423         
424         spin_lock(&sbi->ll_lco.lco_lock);
425         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
426         spin_unlock(&sbi->ll_lco.lco_lock);
427
428         err = obd_register_page_removal_cb(sbi->ll_dt_exp,
429                                            ll_page_removal_cb, 
430                                            ll_pin_extent_cb);
431         if (err) {
432                 CERROR("cannot register page removal callback: rc = %d\n",err);
433                 GOTO(out_dt, err);
434         }
435         err = obd_register_lock_cancel_cb(sbi->ll_dt_exp,
436                                           ll_extent_lock_cancel_cb);
437         if (err) {
438                 CERROR("cannot register lock cancel callback: rc = %d\n", err);
439                 GOTO(out_page_rm_cb, err);
440         }
441
442         err = ll_init_ea_size(sbi->ll_md_exp, sbi->ll_dt_exp);;
443         if (err) {
444                 CERROR("cannot set max EA and cookie sizes: rc = %d\n", err);
445                 GOTO(out_lock_cn_cb, err);
446         }
447
448         err = obd_prep_async_page(sbi->ll_dt_exp, NULL, NULL, NULL,
449                                   0, NULL, NULL, NULL, 0, NULL);
450         if (err < 0) {
451                 LCONSOLE_ERROR_MSG(0x151, "There are no OST's in this "
452                                    "filesystem. There must be at least one "
453                                    "active OST for a client to start.\n");
454                 GOTO(out_lock_cn_cb, err);
455         }
456
457         if (!ll_async_page_slab) {
458                 ll_async_page_slab_size =
459                         size_round(sizeof(struct ll_async_page)) + err;
460                 ll_async_page_slab = cfs_mem_cache_create("ll_async_page",
461                                                           ll_async_page_slab_size,
462                                                           0, 0);
463                 if (!ll_async_page_slab)
464                         GOTO(out_lock_cn_cb, err = -ENOMEM);
465         }
466
467         err = md_getstatus(sbi->ll_md_exp, &rootfid, &oc);
468         if (err) {
469                 CERROR("cannot mds_connect: rc = %d\n", err);
470                 GOTO(out_lock_cn_cb, err);
471         }
472         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&rootfid));
473         sbi->ll_root_fid = rootfid;
474
475         sb->s_op = &lustre_super_operations;
476         sb->s_export_op = &lustre_export_operations;
477
478         /* make root inode
479          * XXX: move this to after cbd setup? */
480         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMDSCAPA;
481         if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
482                 valid |= OBD_MD_FLRMTPERM;
483         else if (sbi->ll_flags & LL_SBI_ACL)
484                 valid |= OBD_MD_FLACL;
485
486         err = md_getattr(sbi->ll_md_exp, &rootfid, oc, valid, 0, &request);
487         if (oc)
488                 free_capa(oc);
489         if (err) {
490                 CERROR("md_getattr failed for root: rc = %d\n", err);
491                 GOTO(out_lock_cn_cb, err);
492         }
493         memset(&lmd, 0, sizeof(lmd));
494         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
495                                sbi->ll_md_exp, &lmd);
496         if (err) {
497                 CERROR("failed to understand root inode md: rc = %d\n", err);
498                 ptlrpc_req_finished (request);
499                 GOTO(out_lock_cn_cb, err);
500         }
501
502         LASSERT(fid_is_sane(&sbi->ll_root_fid));
503         root = ll_iget(sb, ll_fid_build_ino(sbi, &sbi->ll_root_fid), &lmd);
504         md_free_lustre_md(sbi->ll_md_exp, &lmd);
505         ptlrpc_req_finished(request);
506
507         if (root == NULL || is_bad_inode(root)) {
508                 if (lmd.lsm)
509                         obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
510 #ifdef CONFIG_FS_POSIX_ACL
511                 if (lmd.posix_acl) {
512                         posix_acl_release(lmd.posix_acl);
513                         lmd.posix_acl = NULL;
514                 }
515 #endif
516                 CERROR("lustre_lite: bad iget4 for root\n");
517                 GOTO(out_root, err = -EBADF);
518         }
519
520         err = ll_close_thread_start(&sbi->ll_lcq);
521         if (err) {
522                 CERROR("cannot start close thread: rc %d\n", err);
523                 GOTO(out_root, err);
524         }
525
526 #ifdef CONFIG_FS_POSIX_ACL
527         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
528                 rct_init(&sbi->ll_rct);
529                 et_init(&sbi->ll_et);
530         }
531 #endif
532
533         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
534         err = obd_set_info_async(sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
535                                  KEY_CHECKSUM, sizeof(checksum), &checksum,
536                                  NULL);
537
538         sb->s_root = d_alloc_root(root);
539         if (data != NULL)
540                 OBD_FREE(data, sizeof(*data));
541         sb->s_root->d_op = &ll_d_root_ops;
542         RETURN(err);
543 out_root:
544         if (root)
545                 iput(root);
546 out_lock_cn_cb:
547         obd_unregister_lock_cancel_cb(sbi->ll_dt_exp,
548                                       ll_extent_lock_cancel_cb);
549 out_page_rm_cb:
550         obd_unregister_page_removal_cb(sbi->ll_dt_exp,
551                                        ll_page_removal_cb);
552         obd_fid_fini(sbi->ll_dt_exp);
553 out_dt:
554         obd_disconnect(sbi->ll_dt_exp);
555         sbi->ll_dt_exp = NULL;
556 out_md_fid:
557         obd_fid_fini(sbi->ll_md_exp);
558 out_md:
559         obd_disconnect(sbi->ll_md_exp);
560         sbi->ll_md_exp = NULL;
561 out:
562         if (data != NULL)
563                 OBD_FREE_PTR(data);
564         lprocfs_unregister_mountpoint(sbi);
565         return err;
566 }
567
568 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
569 {
570         int size, rc;
571
572         *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
573         size = sizeof(int);
574         rc = obd_get_info(sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
575                           KEY_MAX_EASIZE, &size, lmmsize);
576         if (rc)
577                 CERROR("Get max mdsize error rc %d \n", rc);
578
579         RETURN(rc);
580 }
581
582 void ll_dump_inode(struct inode *inode)
583 {
584         struct list_head *tmp;
585         int dentry_count = 0;
586
587         LASSERT(inode != NULL);
588
589         list_for_each(tmp, &inode->i_dentry)
590                 dentry_count++;
591
592         CERROR("inode %p dump: dev=%s ino=%lu mode=%o count=%u, %d dentries\n",
593                inode, ll_i2mdexp(inode)->exp_obd->obd_name, inode->i_ino,
594                inode->i_mode, atomic_read(&inode->i_count), dentry_count);
595 }
596
597 void lustre_dump_dentry(struct dentry *dentry, int recur)
598 {
599         struct list_head *tmp;
600         int subdirs = 0;
601
602         LASSERT(dentry != NULL);
603
604         list_for_each(tmp, &dentry->d_subdirs)
605                 subdirs++;
606
607         CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u,"
608                " flags=0x%x, fsdata=%p, %d subdirs\n", dentry,
609                dentry->d_name.len, dentry->d_name.name,
610                dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
611                dentry->d_parent, dentry->d_inode, atomic_read(&dentry->d_count),
612                dentry->d_flags, dentry->d_fsdata, subdirs);
613         if (dentry->d_inode != NULL)
614                 ll_dump_inode(dentry->d_inode);
615
616         if (recur == 0)
617                 return;
618
619         list_for_each(tmp, &dentry->d_subdirs) {
620                 struct dentry *d = list_entry(tmp, struct dentry, d_child);
621                 lustre_dump_dentry(d, recur - 1);
622         }
623 }
624
625 #ifdef HAVE_EXPORT___IGET
626 static void prune_dir_dentries(struct inode *inode)
627 {
628         struct dentry *dentry, *prev = NULL;
629
630         /* due to lustre specific logic, a directory
631          * can have few dentries - a bug from VFS POV */
632 restart:
633         spin_lock(&dcache_lock);
634         if (!list_empty(&inode->i_dentry)) {
635                 dentry = list_entry(inode->i_dentry.prev,
636                                     struct dentry, d_alias);
637                 /* in order to prevent infinite loops we
638                  * break if previous dentry is busy */
639                 if (dentry != prev) {
640                         prev = dentry;
641                         dget_locked(dentry);
642                         spin_unlock(&dcache_lock);
643
644                         /* try to kill all child dentries */
645                         lock_dentry(dentry);
646                         shrink_dcache_parent(dentry);
647                         unlock_dentry(dentry);
648                         dput(dentry);
649
650                         /* now try to get rid of current dentry */
651                         d_prune_aliases(inode);
652                         goto restart;
653                 }
654         }
655         spin_unlock(&dcache_lock);
656 }
657
658 static void prune_deathrow_one(struct ll_inode_info *lli)
659 {
660         struct inode *inode = ll_info2i(lli);
661
662         /* first, try to drop any dentries - they hold a ref on the inode */
663         if (S_ISDIR(inode->i_mode))
664                 prune_dir_dentries(inode);
665         else
666                 d_prune_aliases(inode);
667
668
669         /* if somebody still uses it, leave it */
670         LASSERT(atomic_read(&inode->i_count) > 0);
671         if (atomic_read(&inode->i_count) > 1)
672                 goto out;
673
674         CDEBUG(D_INODE, "inode %lu/%u(%d) looks a good candidate for prune\n",
675                inode->i_ino,inode->i_generation, atomic_read(&inode->i_count));
676
677         /* seems nobody uses it anymore */
678         inode->i_nlink = 0;
679
680 out:
681         iput(inode);
682         return;
683 }
684
685 static void prune_deathrow(struct ll_sb_info *sbi, int try)
686 {
687         struct ll_inode_info *lli;
688         int empty;
689
690         do {
691                 if (need_resched() && try)
692                         break;
693
694                 if (try) {
695                         if (!spin_trylock(&sbi->ll_deathrow_lock))
696                                 break;
697                 } else {
698                         spin_lock(&sbi->ll_deathrow_lock);
699                 }
700
701                 empty = 1;
702                 lli = NULL;
703                 if (!list_empty(&sbi->ll_deathrow)) {
704                         lli = list_entry(sbi->ll_deathrow.next,
705                                          struct ll_inode_info,
706                                          lli_dead_list);
707                         list_del_init(&lli->lli_dead_list);
708                         if (!list_empty(&sbi->ll_deathrow))
709                                 empty = 0;
710                 }
711                 spin_unlock(&sbi->ll_deathrow_lock);
712
713                 if (lli)
714                         prune_deathrow_one(lli);
715
716         } while (empty == 0);
717 }
718 #else /* !HAVE_EXPORT___IGET */
719 #define prune_deathrow(sbi, try) do {} while (0)
720 #endif /* HAVE_EXPORT___IGET */
721
722 void client_common_put_super(struct super_block *sb)
723 {
724         struct ll_sb_info *sbi = ll_s2sbi(sb);
725         ENTRY;
726
727 #ifdef CONFIG_FS_POSIX_ACL
728         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
729                 et_fini(&sbi->ll_et);
730                 rct_fini(&sbi->ll_rct);
731         }
732 #endif
733
734         obd_cancel_unused(sbi->ll_dt_exp, NULL, 0, NULL);
735
736         ll_close_thread_shutdown(sbi->ll_lcq);
737
738         /* destroy inodes in deathrow */
739         prune_deathrow(sbi, 0);
740
741         list_del(&sbi->ll_conn_chain);
742
743         obd_unregister_page_removal_cb(sbi->ll_dt_exp,
744                                        ll_page_removal_cb);
745         obd_unregister_lock_cancel_cb(sbi->ll_dt_exp,ll_extent_lock_cancel_cb);
746
747         obd_fid_fini(sbi->ll_dt_exp);
748         obd_disconnect(sbi->ll_dt_exp);
749         sbi->ll_dt_exp = NULL;
750
751         lprocfs_unregister_mountpoint(sbi);
752
753         obd_fid_fini(sbi->ll_md_exp);
754         obd_disconnect(sbi->ll_md_exp);
755         sbi->ll_md_exp = NULL;
756
757         EXIT;
758 }
759
760 void ll_kill_super(struct super_block *sb)
761 {
762         struct ll_sb_info *sbi;
763
764         ENTRY;
765
766         /* not init sb ?*/
767         if (!(sb->s_flags & MS_ACTIVE))
768                 return;
769
770         sbi = ll_s2sbi(sb);
771         /* we need restore s_dev from changed for clustred NFS before put_super
772          * because new kernels have cached s_dev and change sb->s_dev in
773          * put_super not affected real removing devices */
774         if (sbi)
775                 sb->s_dev = sbi->ll_sdev_orig;
776         EXIT;
777 }
778
779 char *ll_read_opt(const char *opt, char *data)
780 {
781         char *value;
782         char *retval;
783         ENTRY;
784
785         CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
786         if (strncmp(opt, data, strlen(opt)))
787                 RETURN(NULL);
788         if ((value = strchr(data, '=')) == NULL)
789                 RETURN(NULL);
790
791         value++;
792         OBD_ALLOC(retval, strlen(value) + 1);
793         if (!retval) {
794                 CERROR("out of memory!\n");
795                 RETURN(NULL);
796         }
797
798         memcpy(retval, value, strlen(value)+1);
799         CDEBUG(D_SUPER, "Assigned option: %s, value %s\n", opt, retval);
800         RETURN(retval);
801 }
802
803 static inline int ll_set_opt(const char *opt, char *data, int fl)
804 {
805         if (strncmp(opt, data, strlen(opt)) != 0)
806                 return(0);
807         else
808                 return(fl);
809 }
810
811 /* non-client-specific mount options are parsed in lmd_parse */
812 static int ll_options(char *options, int *flags)
813 {
814         int tmp;
815         char *s1 = options, *s2;
816         ENTRY;
817
818         if (!options) 
819                 RETURN(0);
820
821         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
822
823         while (*s1) {
824                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
825                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
826                 if (tmp) {
827                         *flags |= tmp;
828                         goto next;
829                 }
830                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
831                 if (tmp) {
832                         *flags |= tmp;
833                         goto next;
834                 }
835                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
836                 if (tmp) {
837                         *flags |= tmp;
838                         goto next;
839                 }
840                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
841                 if (tmp) {
842                         *flags &= ~tmp;
843                         goto next;
844                 }
845                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
846                 if (tmp) {
847                         *flags |= tmp;
848                         goto next;
849                 }
850                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
851                 if (tmp) {
852                         *flags &= ~tmp;
853                         goto next;
854                 }
855                 tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
856                 if (tmp) {
857                         /* Ignore deprecated mount option.  The client will
858                          * always try to mount with ACL support, whether this
859                          * is used depends on whether server supports it. */
860                         goto next;
861                 }
862                 tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
863                 if (tmp) {
864                         goto next;
865                 }
866                 tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
867                 if (tmp) {
868                         *flags |= tmp;
869                         goto next;
870                 }
871
872                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
873                 if (tmp) {
874                         *flags |= tmp;
875                         goto next;
876                 }
877                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
878                 if (tmp) {
879                         *flags &= ~tmp;
880                         goto next;
881                 }
882                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
883                 if (tmp) {
884                         *flags |= tmp;
885                         goto next;
886                 }
887                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
888                 if (tmp) {
889                         *flags &= ~tmp;
890                         goto next;
891                 }
892
893                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
894                                    s1);
895                 RETURN(-EINVAL);
896
897 next:
898                 /* Find next opt */
899                 s2 = strchr(s1, ',');
900                 if (s2 == NULL)
901                         break;
902                 s1 = s2 + 1;
903         }
904         RETURN(0);
905 }
906
907 void ll_lli_init(struct ll_inode_info *lli)
908 {
909         lli->lli_inode_magic = LLI_INODE_MAGIC;
910         sema_init(&lli->lli_size_sem, 1);
911         sema_init(&lli->lli_write_sem, 1);
912         lli->lli_flags = 0;
913         lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
914         spin_lock_init(&lli->lli_lock);
915         INIT_LIST_HEAD(&lli->lli_pending_write_llaps);
916         INIT_LIST_HEAD(&lli->lli_close_list);
917         lli->lli_inode_magic = LLI_INODE_MAGIC;
918         sema_init(&lli->lli_och_sem, 1);
919         lli->lli_mds_read_och = lli->lli_mds_write_och = NULL;
920         lli->lli_mds_exec_och = NULL;
921         lli->lli_open_fd_read_count = lli->lli_open_fd_write_count = 0;
922         lli->lli_open_fd_exec_count = 0;
923         INIT_LIST_HEAD(&lli->lli_dead_list);
924         lli->lli_remote_perms = NULL;
925         lli->lli_rmtperm_utime = 0;
926         sema_init(&lli->lli_rmtperm_sem, 1);
927         INIT_LIST_HEAD(&lli->lli_oss_capas);
928 }
929
930 int ll_fill_super(struct super_block *sb)
931 {
932         struct lustre_profile *lprof;
933         struct lustre_sb_info *lsi = s2lsi(sb);
934         struct ll_sb_info *sbi;
935         char  *dt = NULL, *md = NULL;
936         char  *profilenm = get_profile_name(sb);
937         struct config_llog_instance cfg = {0, };
938         char   ll_instance[sizeof(sb) * 2 + 1];
939         int    err;
940         ENTRY;
941
942         CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
943
944         cfs_module_get();
945
946         /* client additional sb info */
947         lsi->lsi_llsbi = sbi = ll_init_sbi();
948         if (!sbi) {
949                 cfs_module_put();
950                 RETURN(-ENOMEM);
951         }
952
953         err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
954         if (err) 
955                 GOTO(out_free, err);
956
957         /* Generate a string unique to this super, in case some joker tries
958            to mount the same fs at two mount points.
959            Use the address of the super itself.*/
960         sprintf(ll_instance, "%p", sb);
961         cfg.cfg_instance = ll_instance;
962         cfg.cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
963
964         /* set up client obds */
965         err = lustre_process_log(sb, profilenm, &cfg);
966         if (err < 0) {
967                 CERROR("Unable to process log: %d\n", err);
968                 GOTO(out_free, err);
969         }
970
971         lprof = class_get_profile(profilenm);
972         if (lprof == NULL) {
973                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
974                                    " read from the MGS.  Does that filesystem "
975                                    "exist?\n", profilenm);
976                 GOTO(out_free, err = -EINVAL);
977         }
978         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
979                lprof->lp_md, lprof->lp_dt);
980
981         OBD_ALLOC(dt, strlen(lprof->lp_dt) +
982                   strlen(ll_instance) + 2);
983         if (!dt)
984                 GOTO(out_free, err = -ENOMEM);
985         sprintf(dt, "%s-%s", lprof->lp_dt, ll_instance);
986
987         OBD_ALLOC(md, strlen(lprof->lp_md) +
988                   strlen(ll_instance) + 2);
989         if (!md)
990                 GOTO(out_free, err = -ENOMEM);
991         sprintf(md, "%s-%s", lprof->lp_md, ll_instance);
992
993         /* connections, registrations, sb setup */
994         err = client_common_fill_super(sb, md, dt);
995
996 out_free:
997         if (md)
998                 OBD_FREE(md, strlen(md) + 1);
999         if (dt)
1000                 OBD_FREE(dt, strlen(dt) + 1);
1001         if (err) 
1002                 ll_put_super(sb);
1003         else
1004                 LCONSOLE_WARN("Client %s has started\n", profilenm);        
1005
1006         RETURN(err);
1007 } /* ll_fill_super */
1008
1009
1010 void ll_put_super(struct super_block *sb)
1011 {
1012         struct config_llog_instance cfg;
1013         char   ll_instance[sizeof(sb) * 2 + 1];
1014         struct obd_device *obd;
1015         struct lustre_sb_info *lsi = s2lsi(sb);
1016         struct ll_sb_info *sbi = ll_s2sbi(sb);
1017         char *profilenm = get_profile_name(sb);
1018         int force = 1, next;
1019         ENTRY;
1020
1021         CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
1022
1023         ll_print_capa_stat(sbi);
1024
1025         sprintf(ll_instance, "%p", sb);
1026         cfg.cfg_instance = ll_instance;
1027         lustre_end_log(sb, NULL, &cfg);
1028         
1029         if (sbi->ll_md_exp) {
1030                 obd = class_exp2obd(sbi->ll_md_exp);
1031                 if (obd) 
1032                         force = obd->obd_force;
1033         }
1034         
1035         /* We need to set force before the lov_disconnect in 
1036            lustre_common_put_super, since l_d cleans up osc's as well. */
1037         if (force) {
1038                 next = 0;
1039                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1040                                                      &next)) != NULL) {
1041                         obd->obd_force = force;
1042                 }
1043         }                       
1044
1045         if (sbi->ll_lcq) {
1046                 /* Only if client_common_fill_super succeeded */
1047                 client_common_put_super(sb);
1048         }
1049         next = 0;
1050         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) {
1051                 class_manual_cleanup(obd);
1052         }
1053
1054         if (profilenm)
1055                 class_del_profile(profilenm);
1056
1057         ll_free_sbi(sb);
1058         lsi->lsi_llsbi = NULL;
1059
1060         lustre_common_put_super(sb);
1061
1062         LCONSOLE_WARN("client %s umount complete\n", ll_instance);
1063         
1064         cfs_module_put();
1065
1066         EXIT;
1067 } /* client_put_super */
1068
1069 #ifdef HAVE_REGISTER_CACHE
1070 #include <linux/cache_def.h>
1071 #ifdef HAVE_CACHE_RETURN_INT
1072 static int
1073 #else
1074 static void
1075 #endif
1076 ll_shrink_cache(int priority, unsigned int gfp_mask)
1077 {
1078         struct ll_sb_info *sbi;
1079         int count = 0;
1080
1081         list_for_each_entry(sbi, &ll_super_blocks, ll_list)
1082                 count += llap_shrink_cache(sbi, priority);
1083
1084 #ifdef HAVE_CACHE_RETURN_INT
1085         return count;
1086 #endif
1087 }
1088
1089 struct cache_definition ll_cache_definition = {
1090         .name = "llap_cache",
1091         .shrink = ll_shrink_cache
1092 };
1093 #endif /* HAVE_REGISTER_CACHE */
1094
1095 struct inode *ll_inode_from_lock(struct ldlm_lock *lock)
1096 {
1097         struct inode *inode = NULL;
1098         /* NOTE: we depend on atomic igrab() -bzzz */
1099         lock_res_and_lock(lock);
1100         if (lock->l_ast_data) {
1101                 struct ll_inode_info *lli = ll_i2info(lock->l_ast_data);
1102                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1103                         inode = igrab(lock->l_ast_data);
1104                 } else {
1105                         inode = lock->l_ast_data;
1106                         ldlm_lock_debug(NULL, inode->i_state & I_FREEING ?
1107                                                 D_INFO : D_WARNING,
1108                                         lock, __FILE__, __func__, __LINE__,
1109                                         "l_ast_data %p is bogus: magic %08x",
1110                                         lock->l_ast_data, lli->lli_inode_magic);
1111                         inode = NULL;
1112                 }
1113         }
1114         unlock_res_and_lock(lock);
1115         return inode;
1116 }
1117
1118 static int null_if_equal(struct ldlm_lock *lock, void *data)
1119 {
1120         if (data == lock->l_ast_data) {
1121                 lock->l_ast_data = NULL;
1122
1123                 if (lock->l_req_mode != lock->l_granted_mode)
1124                         LDLM_ERROR(lock,"clearing inode with ungranted lock");
1125         }
1126
1127         return LDLM_ITER_CONTINUE;
1128 }
1129
1130 void ll_clear_inode(struct inode *inode)
1131 {
1132         struct ll_inode_info *lli = ll_i2info(inode);
1133         struct ll_sb_info *sbi = ll_i2sbi(inode);
1134         ENTRY;
1135
1136         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1137                inode->i_generation, inode);
1138
1139         if (S_ISDIR(inode->i_mode)) {
1140                 /* these should have been cleared in ll_file_release */
1141                 LASSERT(lli->lli_sai == NULL);
1142                 LASSERT(lli->lli_opendir_key == NULL);
1143                 LASSERT(lli->lli_opendir_pid == 0);
1144         }
1145
1146         ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1147         md_change_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
1148                          null_if_equal, inode);
1149
1150         LASSERT(!lli->lli_open_fd_write_count);
1151         LASSERT(!lli->lli_open_fd_read_count);
1152         LASSERT(!lli->lli_open_fd_exec_count);
1153
1154         if (lli->lli_mds_write_och)
1155                 ll_md_real_close(inode, FMODE_WRITE);
1156         if (lli->lli_mds_exec_och)
1157                 ll_md_real_close(inode, FMODE_EXEC);
1158         if (lli->lli_mds_read_och)
1159                 ll_md_real_close(inode, FMODE_READ);
1160
1161         if (lli->lli_smd) {
1162                 obd_change_cbdata(sbi->ll_dt_exp, lli->lli_smd,
1163                                   null_if_equal, inode);
1164
1165                 obd_free_memmd(sbi->ll_dt_exp, &lli->lli_smd);
1166                 lli->lli_smd = NULL;
1167         }
1168
1169         if (lli->lli_symlink_name) {
1170                 OBD_FREE(lli->lli_symlink_name,
1171                          strlen(lli->lli_symlink_name) + 1);
1172                 lli->lli_symlink_name = NULL;
1173         }
1174
1175         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1176                 LASSERT(lli->lli_posix_acl == NULL);
1177                 if (lli->lli_remote_perms) {
1178                         free_rmtperm_hash(lli->lli_remote_perms);
1179                         lli->lli_remote_perms = NULL;
1180                 }
1181         }
1182 #ifdef CONFIG_FS_POSIX_ACL
1183         else if (lli->lli_posix_acl) {
1184                 LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
1185                 LASSERT(lli->lli_remote_perms == NULL);
1186                 posix_acl_release(lli->lli_posix_acl);
1187                 lli->lli_posix_acl = NULL;
1188         }
1189 #endif
1190         lli->lli_inode_magic = LLI_INODE_DEAD;
1191
1192 #ifdef HAVE_EXPORT___IGET
1193         spin_lock(&sbi->ll_deathrow_lock);
1194         list_del_init(&lli->lli_dead_list);
1195         spin_unlock(&sbi->ll_deathrow_lock);
1196 #endif
1197         ll_clear_inode_capas(inode);
1198
1199         EXIT;
1200 }
1201
1202 int ll_md_setattr(struct inode *inode, struct md_op_data *op_data,
1203                   struct md_open_data **mod)
1204 {
1205         struct lustre_md md;
1206         struct ll_sb_info *sbi = ll_i2sbi(inode);
1207         struct ptlrpc_request *request = NULL;
1208         int rc;
1209         ENTRY;
1210         
1211         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0, 
1212                                      LUSTRE_OPC_ANY, NULL);
1213         if (IS_ERR(op_data))
1214                 RETURN(PTR_ERR(op_data));
1215
1216         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0, 
1217                         &request, mod);
1218         if (rc) {
1219                 ptlrpc_req_finished(request);
1220                 if (rc == -ENOENT) {
1221                         inode->i_nlink = 0;
1222                         /* Unlinked special device node? Or just a race?
1223                          * Pretend we done everything. */
1224                         if (!S_ISREG(inode->i_mode) &&
1225                             !S_ISDIR(inode->i_mode))
1226                                 rc = inode_setattr(inode, &op_data->op_attr);
1227                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1228                         CERROR("md_setattr fails: rc = %d\n", rc);
1229                 }
1230                 RETURN(rc);
1231         }
1232
1233         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1234                               sbi->ll_md_exp, &md);
1235         if (rc) {
1236                 ptlrpc_req_finished(request);
1237                 RETURN(rc);
1238         }
1239
1240         /* We call inode_setattr to adjust timestamps.
1241          * If there is at least some data in file, we cleared ATTR_SIZE
1242          * above to avoid invoking vmtruncate, otherwise it is important
1243          * to call vmtruncate in inode_setattr to update inode->i_size
1244          * (bug 6196) */
1245         rc = inode_setattr(inode, &op_data->op_attr);
1246
1247         /* Extract epoch data if obtained. */
1248         op_data->op_handle = md.body->handle;
1249         op_data->op_ioepoch = md.body->ioepoch;
1250
1251         ll_update_inode(inode, &md);
1252         ptlrpc_req_finished(request);
1253
1254         RETURN(rc);
1255 }
1256
1257 /* Close IO epoch and send Size-on-MDS attribute update. */
1258 static int ll_setattr_done_writing(struct inode *inode,
1259                                    struct md_op_data *op_data,
1260                                    struct md_open_data *mod)
1261 {
1262         struct ll_inode_info *lli = ll_i2info(inode);
1263         int rc = 0;
1264         ENTRY;
1265         
1266         LASSERT(op_data != NULL);
1267         if (!S_ISREG(inode->i_mode))
1268                 RETURN(0);
1269
1270         CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID" for truncate\n",
1271                op_data->op_ioepoch, PFID(&lli->lli_fid));
1272
1273         op_data->op_flags = MF_EPOCH_CLOSE | MF_SOM_CHANGE;
1274         rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1275         if (rc == -EAGAIN) {
1276                 /* MDS has instructed us to obtain Size-on-MDS attribute
1277                  * from OSTs and send setattr to back to MDS. */
1278                 rc = ll_sizeonmds_update(inode, mod, &op_data->op_handle,
1279                                          op_data->op_ioepoch);
1280         } else if (rc) {
1281                 CERROR("inode %lu mdc truncate failed: rc = %d\n",
1282                        inode->i_ino, rc);
1283         }
1284         RETURN(rc);
1285 }
1286
1287 static int ll_setattr_do_truncate(struct inode *inode, loff_t new_size)
1288 {
1289         struct ll_sb_info *sbi = ll_i2sbi(inode);
1290         struct ll_inode_info *lli = ll_i2info(inode);
1291         struct lov_stripe_md *lsm = lli->lli_smd;
1292         int rc;
1293         ldlm_policy_data_t policy = { .l_extent = {new_size,
1294                                                    OBD_OBJECT_EOF } };
1295         struct lustre_handle lockh = { 0 };
1296         int local_lock = 0; /* 0 - no local lock;
1297                              * 1 - lock taken by lock_extent;
1298                              * 2 - by obd_match*/
1299         int ast_flags;
1300         int err;
1301         ENTRY;
1302
1303         UNLOCK_INODE_MUTEX(inode);
1304         UP_WRITE_I_ALLOC_SEM(inode);
1305
1306         if (sbi->ll_lockless_truncate_enable &&
1307             (sbi->ll_lco.lco_flags & OBD_CONNECT_TRUNCLOCK)) {
1308                 ast_flags = LDLM_FL_BLOCK_GRANTED;
1309                 rc = obd_match(sbi->ll_dt_exp, lsm, LDLM_EXTENT,
1310                                &policy, LCK_PW, &ast_flags, inode, &lockh);
1311                 if (rc > 0) {
1312                         local_lock = 2;
1313                         rc = 0;
1314                 } else if (rc == 0) {
1315                         rc = ll_file_punch(inode, new_size, 1);
1316                 }
1317         } else {
1318                 /* XXX when we fix the AST intents to pass the discard-range
1319                  * XXX extent, make ast_flags always LDLM_AST_DISCARD_DATA
1320                  * XXX here. */
1321                 ast_flags = (new_size == 0) ? LDLM_AST_DISCARD_DATA : 0;
1322                 rc = ll_extent_lock(NULL, inode, lsm, LCK_PW, &policy,
1323                                     &lockh, ast_flags);
1324                 if (likely(rc == 0))
1325                         local_lock = 1;
1326         }
1327
1328 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1329         DOWN_WRITE_I_ALLOC_SEM(inode);
1330         LOCK_INODE_MUTEX(inode);
1331 #else
1332         LOCK_INODE_MUTEX(inode);
1333         DOWN_WRITE_I_ALLOC_SEM(inode);
1334 #endif
1335         if (likely(rc == 0)) {
1336                 /* Only ll_inode_size_lock is taken at this level.
1337                  * lov_stripe_lock() is grabbed by ll_truncate() only over
1338                  * call to obd_adjust_kms().  If vmtruncate returns 0, then
1339                  * ll_truncate dropped ll_inode_size_lock() */
1340                 ll_inode_size_lock(inode, 0);
1341                 if (!local_lock) {
1342                         spin_lock(&lli->lli_lock);
1343                         lli->lli_flags |= LLIF_SRVLOCK;
1344                         spin_unlock(&lli->lli_lock);
1345                 }
1346                 rc = vmtruncate(inode, new_size);
1347                 if (!local_lock) {
1348                         spin_lock(&lli->lli_lock);
1349                         lli->lli_flags &= ~LLIF_SRVLOCK;
1350                         spin_unlock(&lli->lli_lock);
1351                 }
1352                 if (rc != 0) {
1353                         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
1354                         ll_inode_size_unlock(inode, 0);
1355                 }
1356         }
1357
1358         if (local_lock) {
1359                 if (local_lock == 2)
1360                         err = obd_cancel(sbi->ll_dt_exp, lsm, LCK_PW, &lockh);
1361                 else
1362                         err = ll_extent_unlock(NULL, inode, lsm, LCK_PW, &lockh);
1363                 if (unlikely(err != 0)){
1364                         CERROR("extent unlock failed: err=%d,"
1365                                " unlock method =%d\n", err, local_lock);
1366                         if (rc == 0)
1367                                 rc = err;
1368                 }
1369         }
1370         RETURN(rc);
1371 }
1372
1373 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1374  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1375  * keep these values until such a time that objects are allocated for it.
1376  * We do the MDS operations first, as it is checking permissions for us.
1377  * We don't to the MDS RPC if there is nothing that we want to store there,
1378  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1379  * going to do an RPC anyways.
1380  *
1381  * If we are doing a truncate, we will send the mtime and ctime updates
1382  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1383  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1384  * at the same time.
1385  */
1386 int ll_setattr_raw(struct inode *inode, struct iattr *attr)
1387 {
1388         struct ll_inode_info *lli = ll_i2info(inode);
1389         struct lov_stripe_md *lsm = lli->lli_smd;
1390         struct ll_sb_info *sbi = ll_i2sbi(inode);
1391         struct md_op_data *op_data = NULL;
1392         struct md_open_data *mod = NULL;
1393         int ia_valid = attr->ia_valid;
1394         int rc = 0, rc1 = 0;
1395         ENTRY;
1396
1397         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu valid %x\n", inode->i_ino,
1398                attr->ia_valid);
1399         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETATTR, 1);
1400
1401         if (ia_valid & ATTR_SIZE) {
1402                 if (attr->ia_size > ll_file_maxbytes(inode)) {
1403                         CDEBUG(D_INODE, "file too large %llu > "LPU64"\n",
1404                                attr->ia_size, ll_file_maxbytes(inode));
1405                         RETURN(-EFBIG);
1406                 }
1407
1408                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME | ATTR_TRUNC;
1409         }
1410
1411         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1412         if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
1413                 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
1414                         RETURN(-EPERM);
1415         }
1416
1417         /* We mark all of the fields "set" so MDS/OST does not re-set them */
1418         if (attr->ia_valid & ATTR_CTIME) {
1419                 attr->ia_ctime = CURRENT_TIME;
1420                 attr->ia_valid |= ATTR_CTIME_SET;
1421         }
1422         if (!(ia_valid & ATTR_ATIME_SET) && (attr->ia_valid & ATTR_ATIME)) {
1423                 attr->ia_atime = CURRENT_TIME;
1424                 attr->ia_valid |= ATTR_ATIME_SET;
1425         }
1426         if (!(ia_valid & ATTR_MTIME_SET) && (attr->ia_valid & ATTR_MTIME)) {
1427                 attr->ia_mtime = CURRENT_TIME;
1428                 attr->ia_valid |= ATTR_MTIME_SET;
1429         }
1430         if ((attr->ia_valid & ATTR_CTIME) && !(attr->ia_valid & ATTR_MTIME)) {
1431                 /* To avoid stale mtime on mds, obtain it from ost and send 
1432                    to mds. */
1433                 rc = ll_glimpse_size(inode, 0);
1434                 if (rc) 
1435                         RETURN(rc);
1436                 
1437                 attr->ia_valid |= ATTR_MTIME_SET | ATTR_MTIME;
1438                 attr->ia_mtime = inode->i_mtime;
1439         }
1440
1441         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1442                 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
1443                        LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1444                        cfs_time_current_sec());
1445
1446         /* NB: ATTR_SIZE will only be set after this point if the size
1447          * resides on the MDS, ie, this file has no objects. */
1448         if (lsm)
1449                 attr->ia_valid &= ~ATTR_SIZE;
1450
1451         /* We always do an MDS RPC, even if we're only changing the size;
1452          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1453
1454         OBD_ALLOC_PTR(op_data);
1455         if (op_data == NULL)
1456                 RETURN(-ENOMEM);
1457
1458         memcpy(&op_data->op_attr, attr, sizeof(*attr));
1459
1460         /* Open epoch for truncate. */
1461         if ((ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM) &&
1462             (ia_valid & ATTR_SIZE))
1463                 op_data->op_flags = MF_EPOCH_OPEN;
1464
1465         rc = ll_md_setattr(inode, op_data, &mod);
1466         if (rc)
1467                 GOTO(out, rc);
1468
1469         if (op_data->op_ioepoch)
1470                 CDEBUG(D_INODE, "Epoch "LPU64" opened on "DFID" for "
1471                        "truncate\n", op_data->op_ioepoch, PFID(&lli->lli_fid));
1472
1473         if (!lsm || !S_ISREG(inode->i_mode)) {
1474                 CDEBUG(D_INODE, "no lsm: not setting attrs on OST\n");
1475                 GOTO(out, rc = 0);
1476         }
1477
1478         /* We really need to get our PW lock before we change inode->i_size.
1479          * If we don't we can race with other i_size updaters on our node, like
1480          * ll_file_read.  We can also race with i_size propogation to other
1481          * nodes through dirtying and writeback of final cached pages.  This
1482          * last one is especially bad for racing o_append users on other
1483          * nodes. */
1484         if (ia_valid & ATTR_SIZE) {
1485                 rc = ll_setattr_do_truncate(inode, attr->ia_size);
1486         } else if (ia_valid & (ATTR_MTIME | ATTR_MTIME_SET)) {
1487                 obd_flag flags;
1488                 struct obd_info oinfo = { { { 0 } } };
1489                 struct obdo *oa;
1490
1491                 CDEBUG(D_INODE, "set mtime on OST inode %lu to %lu\n",
1492                        inode->i_ino, LTIME_S(attr->ia_mtime));
1493
1494                 OBDO_ALLOC(oa);
1495                 if (oa) {
1496                         oa->o_id = lsm->lsm_object_id;
1497                         oa->o_gr = lsm->lsm_object_gr;
1498                         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
1499
1500                         flags = OBD_MD_FLTYPE | OBD_MD_FLATIME |
1501                                 OBD_MD_FLMTIME | OBD_MD_FLCTIME |
1502                                 OBD_MD_FLFID | OBD_MD_FLGENER | 
1503                                 OBD_MD_FLGROUP;
1504
1505                         obdo_from_inode(oa, inode, flags);
1506
1507                         oinfo.oi_oa = oa;
1508                         oinfo.oi_md = lsm;
1509                         oinfo.oi_capa = ll_mdscapa_get(inode);
1510
1511                         /* XXX: this looks unnecessary now. */
1512                         rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1513                         capa_put(oinfo.oi_capa);
1514                         if (rc)
1515                                 CERROR("obd_setattr_async fails: rc=%d\n", rc);
1516                         OBDO_FREE(oa);
1517                 } else {
1518                         rc = -ENOMEM;
1519                 }
1520         }
1521         EXIT;
1522 out:
1523         if (op_data) {
1524                 if (op_data->op_ioepoch)
1525                         rc1 = ll_setattr_done_writing(inode, op_data, mod);
1526                 ll_finish_md_op_data(op_data);
1527         }
1528         return rc ? rc : rc1;
1529 }
1530
1531 int ll_setattr(struct dentry *de, struct iattr *attr)
1532 {
1533         int mode;
1534
1535         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
1536             (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
1537                 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1538         if ((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) == 
1539             (ATTR_SIZE|ATTR_MODE)) {
1540                 mode = de->d_inode->i_mode;
1541                 if (((mode & S_ISUID) && (!(attr->ia_mode & S_ISUID))) ||
1542                     ((mode & S_ISGID) && (mode & S_IXGRP) &&
1543                     (!(attr->ia_mode & S_ISGID))))
1544                         attr->ia_valid |= ATTR_FORCE;
1545         }
1546
1547         return ll_setattr_raw(de->d_inode, attr);
1548 }
1549
1550 int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1551                        __u64 max_age, __u32 flags)
1552 {
1553         struct ll_sb_info *sbi = ll_s2sbi(sb);
1554         struct obd_statfs obd_osfs;
1555         int rc;
1556         ENTRY;
1557
1558         rc = obd_statfs(class_exp2obd(sbi->ll_md_exp), osfs, max_age, flags);
1559         if (rc) {
1560                 CERROR("md_statfs fails: rc = %d\n", rc);
1561                 RETURN(rc);
1562         }
1563
1564         osfs->os_type = sb->s_magic;
1565
1566         CDEBUG(D_SUPER, "MDC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1567                osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,osfs->os_files);
1568
1569         rc = obd_statfs_rqset(class_exp2obd(sbi->ll_dt_exp),
1570                               &obd_osfs, max_age, flags);
1571         if (rc) {
1572                 CERROR("obd_statfs fails: rc = %d\n", rc);
1573                 RETURN(rc);
1574         }
1575
1576         CDEBUG(D_SUPER, "OSC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1577                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1578                obd_osfs.os_files);
1579
1580         osfs->os_bsize = obd_osfs.os_bsize;
1581         osfs->os_blocks = obd_osfs.os_blocks;
1582         osfs->os_bfree = obd_osfs.os_bfree;
1583         osfs->os_bavail = obd_osfs.os_bavail;
1584
1585         /* If we don't have as many objects free on the OST as inodes
1586          * on the MDS, we reduce the total number of inodes to
1587          * compensate, so that the "inodes in use" number is correct.
1588          */
1589         if (obd_osfs.os_ffree < osfs->os_ffree) {
1590                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1591                         obd_osfs.os_ffree;
1592                 osfs->os_ffree = obd_osfs.os_ffree;
1593         }
1594
1595         RETURN(rc);
1596 }
1597 #ifndef HAVE_STATFS_DENTRY_PARAM
1598 int ll_statfs(struct super_block *sb, struct kstatfs *sfs)
1599 {
1600 #else
1601 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1602 {
1603         struct super_block *sb = de->d_sb;
1604 #endif
1605         struct obd_statfs osfs;
1606         int rc;
1607
1608         CDEBUG(D_VFSTRACE, "VFS Op: at "LPU64" jiffies\n", get_jiffies_64());
1609         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1610
1611         /* For now we will always get up-to-date statfs values, but in the
1612          * future we may allow some amount of caching on the client (e.g.
1613          * from QOS or lprocfs updates). */
1614         rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - 1, 0);
1615         if (rc)
1616                 return rc;
1617
1618         statfs_unpack(sfs, &osfs);
1619
1620         /* We need to downshift for all 32-bit kernels, because we can't
1621          * tell if the kernel is being called via sys_statfs64() or not.
1622          * Stop before overflowing f_bsize - in which case it is better
1623          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
1624         if (sizeof(long) < 8) {
1625                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1626                         sfs->f_bsize <<= 1;
1627
1628                         osfs.os_blocks >>= 1;
1629                         osfs.os_bfree >>= 1;
1630                         osfs.os_bavail >>= 1;
1631                 }
1632         }
1633
1634         sfs->f_blocks = osfs.os_blocks;
1635         sfs->f_bfree = osfs.os_bfree;
1636         sfs->f_bavail = osfs.os_bavail;
1637
1638         return 0;
1639 }
1640
1641 void ll_inode_size_lock(struct inode *inode, int lock_lsm)
1642 {
1643         struct ll_inode_info *lli;
1644         struct lov_stripe_md *lsm;
1645
1646         lli = ll_i2info(inode);
1647         LASSERT(lli->lli_size_sem_owner != current);
1648         down(&lli->lli_size_sem);
1649         LASSERT(lli->lli_size_sem_owner == NULL);
1650         lli->lli_size_sem_owner = current;
1651         lsm = lli->lli_smd;
1652         LASSERTF(lsm != NULL || lock_lsm == 0, "lsm %p, lock_lsm %d\n",
1653                  lsm, lock_lsm);
1654         if (lock_lsm)
1655                 lov_stripe_lock(lsm);
1656 }
1657
1658 void ll_inode_size_unlock(struct inode *inode, int unlock_lsm)
1659 {
1660         struct ll_inode_info *lli;
1661         struct lov_stripe_md *lsm;
1662
1663         lli = ll_i2info(inode);
1664         lsm = lli->lli_smd;
1665         LASSERTF(lsm != NULL || unlock_lsm == 0, "lsm %p, lock_lsm %d\n",
1666                  lsm, unlock_lsm);
1667         if (unlock_lsm)
1668                 lov_stripe_unlock(lsm);
1669         LASSERT(lli->lli_size_sem_owner == current);
1670         lli->lli_size_sem_owner = NULL;
1671         up(&lli->lli_size_sem);
1672 }
1673
1674 static void ll_replace_lsm(struct inode *inode, struct lov_stripe_md *lsm)
1675 {
1676         struct ll_inode_info *lli = ll_i2info(inode);
1677
1678         dump_lsm(D_INODE, lsm);
1679         dump_lsm(D_INODE, lli->lli_smd);
1680         LASSERTF(lsm->lsm_magic == LOV_MAGIC_JOIN,
1681                  "lsm must be joined lsm %p\n", lsm);
1682         obd_free_memmd(ll_i2dtexp(inode), &lli->lli_smd);
1683         CDEBUG(D_INODE, "replace lsm %p to lli_smd %p for inode %lu%u(%p)\n",
1684                lsm, lli->lli_smd, inode->i_ino, inode->i_generation, inode);
1685         lli->lli_smd = lsm;
1686         lli->lli_maxbytes = lsm->lsm_maxbytes;
1687         if (lli->lli_maxbytes > PAGE_CACHE_MAXBYTES)
1688                 lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
1689 }
1690
1691 void ll_update_inode(struct inode *inode, struct lustre_md *md)
1692 {
1693         struct ll_inode_info *lli = ll_i2info(inode);
1694         struct mdt_body *body = md->body;
1695         struct lov_stripe_md *lsm = md->lsm;
1696         struct ll_sb_info *sbi = ll_i2sbi(inode);
1697
1698         LASSERT ((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
1699         if (lsm != NULL) {
1700                 if (lli->lli_smd == NULL) {
1701                         if (lsm->lsm_magic != LOV_MAGIC &&
1702                             lsm->lsm_magic != LOV_MAGIC_JOIN) {
1703                                 dump_lsm(D_ERROR, lsm);
1704                                 LBUG();
1705                         }
1706                         CDEBUG(D_INODE, "adding lsm %p to inode %lu/%u(%p)\n",
1707                                lsm, inode->i_ino, inode->i_generation, inode);
1708                         /* ll_inode_size_lock() requires it is only called
1709                          * with lli_smd != NULL or lock_lsm == 0 or we can
1710                          * race between lock/unlock.  bug 9547 */
1711                         lli->lli_smd = lsm;
1712                         lli->lli_maxbytes = lsm->lsm_maxbytes;
1713                         if (lli->lli_maxbytes > PAGE_CACHE_MAXBYTES)
1714                                 lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
1715                 } else {
1716                         if (lli->lli_smd->lsm_magic == lsm->lsm_magic &&
1717                              lli->lli_smd->lsm_stripe_count ==
1718                                         lsm->lsm_stripe_count) {
1719                                 if (lov_stripe_md_cmp(lli->lli_smd, lsm)) {
1720                                         CERROR("lsm mismatch for inode %ld\n",
1721                                                 inode->i_ino);
1722                                         CERROR("lli_smd:\n");
1723                                         dump_lsm(D_ERROR, lli->lli_smd);
1724                                         CERROR("lsm:\n");
1725                                         dump_lsm(D_ERROR, lsm);
1726                                         LBUG();
1727                                 }
1728                         } else
1729                                 ll_replace_lsm(inode, lsm);
1730                 }
1731                 if (lli->lli_smd != lsm)
1732                         obd_free_memmd(ll_i2dtexp(inode), &lsm);
1733         }
1734
1735         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1736                 if (body->valid & OBD_MD_FLRMTPERM)
1737                         ll_update_remote_perm(inode, md->remote_perm);
1738         }
1739 #ifdef CONFIG_FS_POSIX_ACL
1740         else if (body->valid & OBD_MD_FLACL) {
1741                 spin_lock(&lli->lli_lock);
1742                 if (lli->lli_posix_acl)
1743                         posix_acl_release(lli->lli_posix_acl);
1744                 lli->lli_posix_acl = md->posix_acl;
1745                 spin_unlock(&lli->lli_lock);
1746         }
1747 #endif
1748         inode->i_ino = ll_fid_build_ino(sbi, &body->fid1);
1749
1750         if (body->valid & OBD_MD_FLATIME &&
1751             body->atime > LTIME_S(inode->i_atime))
1752                 LTIME_S(inode->i_atime) = body->atime;
1753         
1754         /* mtime is always updated with ctime, but can be set in past.
1755            As write and utime(2) may happen within 1 second, and utime's
1756            mtime has a priority over write's one, so take mtime from mds 
1757            for the same ctimes. */
1758         if (body->valid & OBD_MD_FLCTIME &&
1759             body->ctime >= LTIME_S(inode->i_ctime)) {
1760                 LTIME_S(inode->i_ctime) = body->ctime;
1761                 if (body->valid & OBD_MD_FLMTIME) {
1762                         CDEBUG(D_INODE, "setting ino %lu mtime "
1763                                "from %lu to "LPU64"\n", inode->i_ino, 
1764                                LTIME_S(inode->i_mtime), body->mtime);
1765                         LTIME_S(inode->i_mtime) = body->mtime;
1766                 }
1767         }
1768         if (body->valid & OBD_MD_FLMODE)
1769                 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1770         if (body->valid & OBD_MD_FLTYPE)
1771                 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1772         if (S_ISREG(inode->i_mode)) {
1773                 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, LL_MAX_BLKSIZE_BITS);
1774         } else {
1775                 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1776         }
1777 #ifdef HAVE_INODE_BLKSIZE
1778         inode->i_blksize = 1<<inode->i_blkbits;
1779 #endif
1780         if (body->valid & OBD_MD_FLUID)
1781                 inode->i_uid = body->uid;
1782         if (body->valid & OBD_MD_FLGID)
1783                 inode->i_gid = body->gid;
1784         if (body->valid & OBD_MD_FLFLAGS)
1785                 inode->i_flags = ll_ext_to_inode_flags(body->flags);
1786         if (body->valid & OBD_MD_FLNLINK)
1787                 inode->i_nlink = body->nlink;
1788         if (body->valid & OBD_MD_FLRDEV)
1789                 inode->i_rdev = old_decode_dev(body->rdev);
1790
1791         if (body->valid & OBD_MD_FLID) {
1792                 /* FID shouldn't be changed! */
1793                 if (fid_is_sane(&lli->lli_fid)) {
1794                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1795                                  "Trying to change FID "DFID
1796                                  " to the "DFID", inode %lu/%u(%p)\n",
1797                                  PFID(&lli->lli_fid), PFID(&body->fid1),
1798                                  inode->i_ino, inode->i_generation, inode);
1799                 } else 
1800                         lli->lli_fid = body->fid1;
1801         }
1802
1803         LASSERT(fid_seq(&lli->lli_fid) != 0);
1804
1805         if (body->valid & OBD_MD_FLSIZE) {
1806                 if ((ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM) &&
1807                     S_ISREG(inode->i_mode) && lli->lli_smd) {
1808                         struct lustre_handle lockh;
1809                         ldlm_mode_t mode;
1810                         
1811                         /* As it is possible a blocking ast has been processed
1812                          * by this time, we need to check there is an UPDATE 
1813                          * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1814                          * it. */
1815                         mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1816                                                &lockh);
1817                         if (mode) {
1818                                 if (lli->lli_flags & (LLIF_DONE_WRITING |
1819                                                       LLIF_EPOCH_PENDING |
1820                                                       LLIF_SOM_DIRTY)) {
1821                                         CERROR("ino %lu flags %lu still has "
1822                                                "size authority! do not trust "
1823                                                "the size got from MDS\n",
1824                                                inode->i_ino, lli->lli_flags);
1825                                 } else {
1826                                         /* Use old size assignment to avoid
1827                                          * deadlock bz14138 & bz14326 */
1828                                         inode->i_size = body->size;
1829                                         lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1830                                 }
1831                                 ldlm_lock_decref(&lockh, mode);
1832                         }
1833                 } else {
1834                         /* Use old size assignment to avoid
1835                          * deadlock bz14138 & bz14326 */
1836                         inode->i_size = body->size;
1837                 }
1838
1839                 if (body->valid & OBD_MD_FLBLOCKS)
1840                         inode->i_blocks = body->blocks;
1841         }
1842
1843         if (body->valid & OBD_MD_FLMDSCAPA) {
1844                 LASSERT(md->mds_capa);
1845                 ll_add_capa(inode, md->mds_capa);
1846         }
1847         if (body->valid & OBD_MD_FLOSSCAPA) {
1848                 LASSERT(md->oss_capa);
1849                 ll_add_capa(inode, md->oss_capa);
1850         }
1851 }
1852
1853 static struct backing_dev_info ll_backing_dev_info = {
1854         .ra_pages       = 0,    /* No readahead */
1855 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
1856         .capabilities   = 0,    /* Does contribute to dirty memory */
1857 #else
1858         .memory_backed  = 0,    /* Does contribute to dirty memory */
1859 #endif
1860 };
1861
1862 void ll_read_inode2(struct inode *inode, void *opaque)
1863 {
1864         struct lustre_md *md = opaque;
1865         struct ll_inode_info *lli = ll_i2info(inode);
1866         ENTRY;
1867
1868         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
1869                inode->i_ino, inode->i_generation, inode);
1870
1871         ll_lli_init(lli);
1872
1873         LASSERT(!lli->lli_smd);
1874
1875         /* Core attributes from the MDS first.  This is a new inode, and
1876          * the VFS doesn't zero times in the core inode so we have to do
1877          * it ourselves.  They will be overwritten by either MDS or OST
1878          * attributes - we just need to make sure they aren't newer. */
1879         LTIME_S(inode->i_mtime) = 0;
1880         LTIME_S(inode->i_atime) = 0;
1881         LTIME_S(inode->i_ctime) = 0;
1882         inode->i_rdev = 0;
1883         ll_update_inode(inode, md);
1884
1885         /* OIDEBUG(inode); */
1886
1887         if (S_ISREG(inode->i_mode)) {
1888                 struct ll_sb_info *sbi = ll_i2sbi(inode);
1889                 inode->i_op = &ll_file_inode_operations;
1890                 inode->i_fop = sbi->ll_fop;
1891                 inode->i_mapping->a_ops = &ll_aops;
1892                 EXIT;
1893         } else if (S_ISDIR(inode->i_mode)) {
1894                 inode->i_op = &ll_dir_inode_operations;
1895                 inode->i_fop = &ll_dir_operations;
1896                 inode->i_mapping->a_ops = &ll_dir_aops;
1897                 EXIT;
1898         } else if (S_ISLNK(inode->i_mode)) {
1899                 inode->i_op = &ll_fast_symlink_inode_operations;
1900                 EXIT;
1901         } else {
1902                 inode->i_op = &ll_special_inode_operations;
1903
1904                 init_special_inode(inode, inode->i_mode,
1905                                    kdev_t_to_nr(inode->i_rdev));
1906
1907                 /* initializing backing dev info. */
1908                 inode->i_mapping->backing_dev_info = &ll_backing_dev_info;
1909
1910                 EXIT;
1911         }
1912 }
1913
1914 void ll_delete_inode(struct inode *inode)
1915 {
1916         struct ll_sb_info *sbi = ll_i2sbi(inode);
1917         int rc;
1918         ENTRY;
1919
1920         rc = obd_fid_delete(sbi->ll_md_exp, ll_inode2fid(inode));
1921         if (rc) {
1922                 CERROR("fid_delete() failed, rc %d\n", rc);
1923         }
1924         truncate_inode_pages(&inode->i_data, 0);
1925         clear_inode(inode);
1926
1927         EXIT;
1928 }
1929
1930 int ll_iocontrol(struct inode *inode, struct file *file,
1931                  unsigned int cmd, unsigned long arg)
1932 {
1933         struct ll_sb_info *sbi = ll_i2sbi(inode);
1934         struct ptlrpc_request *req = NULL;
1935         int rc, flags = 0;
1936         ENTRY;
1937
1938         switch(cmd) {
1939         case EXT3_IOC_GETFLAGS: {
1940                 struct mdt_body *body;
1941                 struct obd_capa *oc;
1942
1943                 oc = ll_mdscapa_get(inode);
1944                 rc = md_getattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
1945                                 OBD_MD_FLFLAGS, 0, &req);
1946                 capa_put(oc);
1947                 if (rc) {
1948                         CERROR("failure %d inode %lu\n", rc, inode->i_ino);
1949                         RETURN(-abs(rc));
1950                 }
1951
1952                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1953
1954                 flags = body->flags;
1955
1956                 ptlrpc_req_finished(req);
1957
1958                 RETURN(put_user(flags, (int *)arg));
1959         }
1960         case EXT3_IOC_SETFLAGS: {
1961                 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1962                 struct obd_info oinfo = { { { 0 } } };
1963                 struct md_op_data *op_data;
1964
1965                 if (get_user(flags, (int *)arg))
1966                         RETURN(-EFAULT);
1967
1968                 oinfo.oi_md = lsm;
1969                 OBDO_ALLOC(oinfo.oi_oa);
1970                 if (!oinfo.oi_oa)
1971                         RETURN(-ENOMEM);
1972
1973                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1974                                              LUSTRE_OPC_ANY, NULL);
1975                 if (IS_ERR(op_data))
1976                         RETURN(PTR_ERR(op_data));
1977
1978                 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
1979                 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1980                 rc = md_setattr(sbi->ll_md_exp, op_data,
1981                                 NULL, 0, NULL, 0, &req, NULL);
1982                 ll_finish_md_op_data(op_data);
1983                 ptlrpc_req_finished(req);
1984                 if (rc || lsm == NULL) {
1985                         OBDO_FREE(oinfo.oi_oa);
1986                         RETURN(rc);
1987                 }
1988
1989                 oinfo.oi_oa->o_id = lsm->lsm_object_id;
1990                 oinfo.oi_oa->o_gr = lsm->lsm_object_gr;
1991                 oinfo.oi_oa->o_flags = flags;
1992                 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS | 
1993                                        OBD_MD_FLGROUP;
1994                 oinfo.oi_capa = ll_mdscapa_get(inode);
1995
1996                 obdo_from_inode(oinfo.oi_oa, inode,
1997                                 OBD_MD_FLFID | OBD_MD_FLGENER);
1998                 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1999                 capa_put(oinfo.oi_capa);
2000                 OBDO_FREE(oinfo.oi_oa);
2001                 if (rc) {
2002                         if (rc != -EPERM && rc != -EACCES)
2003                                 CERROR("md_setattr_async fails: rc = %d\n", rc);
2004                         RETURN(rc);
2005                 }
2006
2007                 inode->i_flags = ll_ext_to_inode_flags(flags |
2008                                                        MDS_BFLAG_EXT_FLAGS);
2009                 RETURN(0);
2010         }
2011         default:
2012                 RETURN(-ENOSYS);
2013         }
2014
2015         RETURN(0);
2016 }
2017
2018 int ll_flush_ctx(struct inode *inode)
2019 {
2020         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2021
2022         CDEBUG(D_SEC, "flush context for user %d\n", current->uid);
2023
2024         obd_set_info_async(sbi->ll_md_exp,
2025                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2026                            0, NULL, NULL);
2027         obd_set_info_async(sbi->ll_dt_exp,
2028                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2029                            0, NULL, NULL);
2030         return 0;
2031 }
2032
2033 /* umount -f client means force down, don't save state */
2034 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
2035 void ll_umount_begin(struct vfsmount *vfsmnt, int flags)
2036 {
2037         struct super_block *sb = vfsmnt->mnt_sb;
2038 #else
2039 void ll_umount_begin(struct super_block *sb)
2040 {
2041 #endif
2042         struct lustre_sb_info *lsi = s2lsi(sb);
2043         struct ll_sb_info *sbi = ll_s2sbi(sb);
2044         struct obd_device *obd;
2045         struct obd_ioctl_data ioc_data = { 0 };
2046         ENTRY;
2047
2048 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
2049         if (!(flags & MNT_FORCE)) {
2050                 EXIT;
2051                 return;
2052         }
2053 #endif
2054
2055         /* Tell the MGC we got umount -f */
2056         lsi->lsi_flags |= LSI_UMOUNT_FORCE;
2057
2058         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2059                sb->s_count, atomic_read(&sb->s_active));
2060
2061         obd = class_exp2obd(sbi->ll_md_exp);
2062         if (obd == NULL) {
2063                 CERROR("Invalid MDC connection handle "LPX64"\n",
2064                        sbi->ll_md_exp->exp_handle.h_cookie);
2065                 EXIT;
2066                 return;
2067         }
2068         obd->obd_force = 1;
2069         obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp, sizeof ioc_data,
2070                       &ioc_data, NULL);
2071
2072         obd = class_exp2obd(sbi->ll_dt_exp);
2073         if (obd == NULL) {
2074                 CERROR("Invalid LOV connection handle "LPX64"\n",
2075                        sbi->ll_dt_exp->exp_handle.h_cookie);
2076                 EXIT;
2077                 return;
2078         }
2079
2080         obd->obd_force = 1;
2081         obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp, sizeof ioc_data,
2082                       &ioc_data, NULL);
2083
2084         /* Really, we'd like to wait until there are no requests outstanding,
2085          * and then continue.  For now, we just invalidate the requests,
2086          * schedule, and hope.
2087          */
2088         schedule();
2089
2090         EXIT;
2091 }
2092
2093 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2094 {
2095         struct ll_sb_info *sbi = ll_s2sbi(sb);
2096         int err;
2097         __u32 read_only;
2098
2099         if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2100                 read_only = *flags & MS_RDONLY;
2101                 err = obd_set_info_async(sbi->ll_md_exp,
2102                                          sizeof(KEY_READ_ONLY),
2103                                          KEY_READ_ONLY, sizeof(read_only),
2104                                          &read_only, NULL);
2105                 if (err) {
2106                         CERROR("Failed to change the read-only flag during "
2107                                "remount: %d\n", err);
2108                         return err;
2109                 }
2110
2111                 if (read_only)
2112                         sb->s_flags |= MS_RDONLY;
2113                 else
2114                         sb->s_flags &= ~MS_RDONLY;
2115         }
2116         return 0;
2117 }
2118
2119 int ll_prep_inode(struct inode **inode,
2120                   struct ptlrpc_request *req,
2121                   struct super_block *sb)
2122 {
2123         struct ll_sb_info *sbi = NULL;
2124         struct lustre_md md;
2125         int rc = 0;
2126         ENTRY;
2127
2128         LASSERT(*inode || sb);
2129         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2130         prune_deathrow(sbi, 1);
2131         memset(&md, 0, sizeof(struct lustre_md));
2132
2133         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2134                               sbi->ll_md_exp, &md);
2135         if (rc)
2136                 RETURN(rc);
2137
2138         if (*inode) {
2139                 ll_update_inode(*inode, &md);
2140         } else {
2141                 LASSERT(sb != NULL);
2142
2143                 /*
2144                  * At this point server returns to client's same fid as client
2145                  * generated for creating. So using ->fid1 is okay here.
2146                  */
2147                 LASSERT(fid_is_sane(&md.body->fid1));
2148
2149                 *inode = ll_iget(sb, ll_fid_build_ino(sbi, &md.body->fid1), &md);
2150                 if (*inode == NULL || is_bad_inode(*inode)) {
2151                         if (md.lsm)
2152                                 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2153 #ifdef CONFIG_FS_POSIX_ACL
2154                         if (md.posix_acl) {
2155                                 posix_acl_release(md.posix_acl);
2156                                 md.posix_acl = NULL;
2157                         }
2158 #endif
2159                         rc = -ENOMEM;
2160                         CERROR("new_inode -fatal: rc %d\n", rc);
2161                         GOTO(out, rc);
2162                 }
2163         }
2164
2165         rc = obd_checkmd(sbi->ll_dt_exp, sbi->ll_md_exp,
2166                          ll_i2info(*inode)->lli_smd);
2167 out:
2168         md_free_lustre_md(sbi->ll_md_exp, &md);
2169         RETURN(rc);
2170 }
2171
2172 char *llap_origins[] = {
2173         [LLAP_ORIGIN_UNKNOWN] = "--",
2174         [LLAP_ORIGIN_READPAGE] = "rp",
2175         [LLAP_ORIGIN_READAHEAD] = "ra",
2176         [LLAP_ORIGIN_COMMIT_WRITE] = "cw",
2177         [LLAP_ORIGIN_WRITEPAGE] = "wp",
2178         [LLAP_ORIGIN_LOCKLESS_IO] = "ls"
2179 };
2180
2181 struct ll_async_page *llite_pglist_next_llap(struct ll_sb_info *sbi,
2182                                              struct list_head *list)
2183 {
2184         struct ll_async_page *llap;
2185         struct list_head *pos;
2186
2187         list_for_each(pos, list) {
2188                 if (pos == &sbi->ll_pglist)
2189                         return NULL;
2190                 llap = list_entry(pos, struct ll_async_page, llap_pglist_item);
2191                 if (llap->llap_page == NULL)
2192                         continue;
2193                 return llap;
2194         }
2195         LBUG();
2196         return NULL;
2197 }
2198
2199 int ll_obd_statfs(struct inode *inode, void *arg)
2200 {
2201         struct ll_sb_info *sbi = NULL;
2202         struct obd_export *exp;
2203         char *buf = NULL;
2204         struct obd_ioctl_data *data = NULL;
2205         __u32 type;
2206         int len = 0, rc;
2207
2208         if (!inode || !(sbi = ll_i2sbi(inode)))
2209                 GOTO(out_statfs, rc = -EINVAL);
2210
2211         rc = obd_ioctl_getdata(&buf, &len, arg);
2212         if (rc)
2213                 GOTO(out_statfs, rc);
2214
2215         data = (void*)buf;
2216         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2217             !data->ioc_pbuf1 || !data->ioc_pbuf2)
2218                 GOTO(out_statfs, rc = -EINVAL);
2219
2220         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2221         if (type == LL_STATFS_MDC)
2222                 exp = sbi->ll_md_exp;
2223         else if (type == LL_STATFS_LOV)
2224                 exp = sbi->ll_dt_exp;
2225         else 
2226                 GOTO(out_statfs, rc = -ENODEV);
2227
2228         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
2229         if (rc)
2230                 GOTO(out_statfs, rc);
2231 out_statfs:
2232         if (buf)
2233                 obd_ioctl_freedata(buf, len);
2234         return rc;
2235 }
2236
2237 int ll_process_config(struct lustre_cfg *lcfg)
2238 {
2239         char *ptr;
2240         void *sb;
2241         struct lprocfs_static_vars lvars;
2242         unsigned long x; 
2243         int rc = 0;
2244
2245         lprocfs_llite_init_vars(&lvars);
2246
2247         /* The instance name contains the sb: lustre-client-aacfe000 */
2248         ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2249         if (!ptr || !*(++ptr)) 
2250                 return -EINVAL;
2251         if (sscanf(ptr, "%lx", &x) != 1)
2252                 return -EINVAL;
2253         sb = (void *)x;
2254         /* This better be a real Lustre superblock! */
2255         LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2256
2257         /* Note we have not called client_common_fill_super yet, so 
2258            proc fns must be able to handle that! */
2259         rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2260                                       lcfg, sb);
2261         return(rc);
2262 }
2263
2264 /* this function prepares md_op_data hint for passing ot down to MD stack. */
2265 struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data,
2266                                        struct inode *i1, struct inode *i2,
2267                                        const char *name, int namelen,
2268                                        int mode, __u32 opc, void *data)
2269 {
2270         LASSERT(i1 != NULL);
2271
2272         if (namelen > ll_i2sbi(i1)->ll_namelen)
2273                 return ERR_PTR(-ENAMETOOLONG);
2274         
2275         if (op_data == NULL)
2276                 OBD_ALLOC_PTR(op_data);
2277         
2278         if (op_data == NULL)
2279                 return ERR_PTR(-ENOMEM);
2280
2281         ll_i2gids(op_data->op_suppgids, i1, i2);
2282         op_data->op_fid1 = *ll_inode2fid(i1);
2283         op_data->op_capa1 = ll_mdscapa_get(i1);
2284
2285         if (i2) {
2286                 op_data->op_fid2 = *ll_inode2fid(i2);
2287                 op_data->op_capa2 = ll_mdscapa_get(i2);
2288         } else {
2289                 fid_zero(&op_data->op_fid2);
2290                 op_data->op_capa2 = NULL;
2291         }
2292
2293         op_data->op_name = name;
2294         op_data->op_namelen = namelen;
2295         op_data->op_mode = mode;
2296         op_data->op_mod_time = cfs_time_current_sec();
2297         op_data->op_fsuid = current->fsuid;
2298         op_data->op_fsgid = current->fsgid;
2299         op_data->op_cap = current->cap_effective;
2300         op_data->op_bias = MDS_CHECK_SPLIT;
2301         op_data->op_opc = opc;
2302         op_data->op_mds = 0;
2303         op_data->op_data = data;
2304
2305         return op_data;
2306 }
2307
2308 void ll_finish_md_op_data(struct md_op_data *op_data)
2309 {
2310         capa_put(op_data->op_capa1);
2311         capa_put(op_data->op_capa2);
2312         OBD_FREE_PTR(op_data);
2313 }