Whamcloud - gitweb
452873118fabf87ee0402b08f157098642c8d7fa
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see [sun.com URL with a
20  * copy of GPLv2].
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/llite/llite_lib.c
37  *
38  * Lustre Light Super operations
39  */
40
41 #define DEBUG_SUBSYSTEM S_LLITE
42
43 #include <linux/module.h>
44 #include <linux/types.h>
45 #include <linux/random.h>
46 #include <linux/version.h>
47 #include <linux/mm.h>
48
49 #include <lustre_lite.h>
50 #include <lustre_ha.h>
51 #include <lustre_dlm.h>
52 #include <lprocfs_status.h>
53 #include <lustre_disk.h>
54 #include <lustre_param.h>
55 #include <lustre_log.h>
56 #include <obd_cksum.h>
57 #include <lustre_cache.h>
58 #include "llite_internal.h"
59
60 cfs_mem_cache_t *ll_file_data_slab;
61
62 LIST_HEAD(ll_super_blocks);
63 spinlock_t ll_sb_lock = SPIN_LOCK_UNLOCKED;
64
65 extern struct address_space_operations ll_aops;
66 extern struct address_space_operations ll_dir_aops;
67
68 #ifndef log2
69 #define log2(n) ffz(~(n))
70 #endif
71
72 static inline void ll_pglist_fini(struct ll_sb_info *sbi)
73 {
74         struct page *page;
75         int i;
76         
77         if (sbi->ll_pglist == NULL)
78                 return;
79
80         for_each_possible_cpu(i) {
81                 page = sbi->ll_pglist[i]->llpd_page;
82                 if (page) {
83                         sbi->ll_pglist[i] = NULL;
84                         __free_page(page);
85                 }
86         }
87
88         OBD_FREE(sbi->ll_pglist, sizeof(void *)*num_possible_cpus());
89         sbi->ll_pglist = NULL;
90 }
91
92 static inline int ll_pglist_init(struct ll_sb_info *sbi)
93 {
94         struct ll_pglist_data *pd;
95         unsigned long budget;
96         int i, color = 0;
97         ENTRY;
98
99         OBD_ALLOC(sbi->ll_pglist, sizeof(void *) * num_possible_cpus());
100         if (sbi->ll_pglist == NULL)
101                 RETURN(-ENOMEM);
102
103         budget = sbi->ll_async_page_max / num_online_cpus();
104         for_each_possible_cpu(i) {
105                 struct page *page = alloc_pages_node(cpu_to_node(i),
106                                                     GFP_KERNEL, 0);
107                 if (page == NULL) {
108                         ll_pglist_fini(sbi);
109                         RETURN(-ENOMEM);
110                 }
111
112                 if (color + L1_CACHE_ALIGN(sizeof(*pd)) > PAGE_SIZE)
113                         color = 0;
114
115                 pd = (struct ll_pglist_data *)(page_address(page) + color);
116                 memset(pd, 0, sizeof(*pd));
117                 spin_lock_init(&pd->llpd_lock);
118                 INIT_LIST_HEAD(&pd->llpd_list);
119                 if (cpu_online(i))
120                         pd->llpd_budget = budget;
121                 pd->llpd_cpu = i;
122                 pd->llpd_page = page;
123                 atomic_set(&pd->llpd_sample_count, 0);
124                 sbi->ll_pglist[i] = pd;
125                 color += L1_CACHE_ALIGN(sizeof(*pd));
126         }
127
128         RETURN(0);
129 }
130
131 static struct ll_sb_info *ll_init_sbi(void)
132 {
133         struct ll_sb_info *sbi = NULL;
134         unsigned long pages;
135         struct sysinfo si;
136         class_uuid_t uuid;
137         int i;
138         ENTRY;
139
140         OBD_ALLOC(sbi, sizeof(*sbi));
141         if (!sbi)
142                 RETURN(NULL);
143
144         OBD_ALLOC(sbi->ll_async_page_sample, sizeof(long)*num_possible_cpus());
145         if (sbi->ll_async_page_sample == NULL)
146                 GOTO(out, 0);
147
148         spin_lock_init(&sbi->ll_lock);
149         spin_lock_init(&sbi->ll_lco.lco_lock);
150         spin_lock_init(&sbi->ll_pp_extent_lock);
151         spin_lock_init(&sbi->ll_process_lock);
152         sbi->ll_rw_stats_on = 0;
153
154         si_meminfo(&si);
155         pages = si.totalram - si.totalhigh;
156         if (pages >> (20 - CFS_PAGE_SHIFT) < 512)
157                 sbi->ll_async_page_max = pages / 2;
158         else
159                 sbi->ll_async_page_max = (pages / 4) * 3;
160
161         lcounter_init(&sbi->ll_async_page_count);
162         spin_lock_init(&sbi->ll_async_page_reblnc_lock);
163         sbi->ll_async_page_sample_max = 64 * num_online_cpus();
164         sbi->ll_async_page_reblnc_count = 0;
165         sbi->ll_async_page_clock_hand = 0;
166         if (ll_pglist_init(sbi))
167                 GOTO(out, 0);
168
169         sbi->ll_ra_info.ra_max_pages = min(pages / 32,
170                                            SBI_DEFAULT_READAHEAD_MAX);
171         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
172                                            SBI_DEFAULT_READAHEAD_WHOLE_MAX;
173         sbi->ll_contention_time = SBI_DEFAULT_CONTENTION_SECONDS;
174         sbi->ll_lockless_truncate_enable = SBI_DEFAULT_LOCKLESS_TRUNCATE_ENABLE;
175         INIT_LIST_HEAD(&sbi->ll_conn_chain);
176         INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
177
178         ll_generate_random_uuid(uuid);
179         class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
180         CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
181
182         spin_lock(&ll_sb_lock);
183         list_add_tail(&sbi->ll_list, &ll_super_blocks);
184         spin_unlock(&ll_sb_lock);
185
186 #ifdef ENABLE_LLITE_CHECKSUM
187         sbi->ll_flags |= LL_SBI_CHECKSUM;
188 #endif
189
190 #ifdef HAVE_LRU_RESIZE_SUPPORT
191         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
192 #endif
193
194 #ifdef HAVE_EXPORT___IGET
195         INIT_LIST_HEAD(&sbi->ll_deathrow);
196         spin_lock_init(&sbi->ll_deathrow_lock);
197 #endif
198         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
199                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].pp_r_hist.oh_lock);
200                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].pp_w_hist.oh_lock);
201         }
202
203         /* metadata statahead is enabled by default */
204         sbi->ll_sa_max = LL_SA_RPC_DEF;
205
206         RETURN(sbi);
207
208 out:
209         if (sbi->ll_async_page_sample)
210                 OBD_FREE(sbi->ll_async_page_sample, 
211                          sizeof(long) * num_possible_cpus());
212         ll_pglist_fini(sbi);
213         OBD_FREE(sbi, sizeof(*sbi));
214         RETURN(NULL);
215 }
216
217 void ll_free_sbi(struct super_block *sb)
218 {
219         struct ll_sb_info *sbi = ll_s2sbi(sb);
220         ENTRY;
221
222         if (sbi != NULL) {
223                 ll_pglist_fini(sbi);
224                 spin_lock(&ll_sb_lock);
225                 list_del(&sbi->ll_list);
226                 spin_unlock(&ll_sb_lock);
227                 lcounter_destroy(&sbi->ll_async_page_count);
228                 OBD_FREE(sbi->ll_async_page_sample, 
229                          sizeof(long) * num_possible_cpus());
230                 OBD_FREE(sbi, sizeof(*sbi));
231         }
232         EXIT;
233 }
234
235 static struct dentry_operations ll_d_root_ops = {
236 #ifdef DCACHE_LUSTRE_INVALID
237         .d_compare = ll_dcompare,
238 #endif
239 };
240
241 /* Initialize the default and maximum LOV EA and cookie sizes.  This allows
242  * us to make MDS RPCs with large enough reply buffers to hold the
243  * maximum-sized (= maximum striped) EA and cookie without having to
244  * calculate this (via a call into the LOV + OSCs) each time we make an RPC. */
245 static int ll_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
246 {
247         struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC };
248         __u32 valsize = sizeof(struct lov_desc);
249         int rc, easize, def_easize, cookiesize;
250         struct lov_desc desc;
251         __u32 stripes;
252         ENTRY;
253
254         rc = obd_get_info(dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC,
255                           &valsize, &desc, NULL);
256         if (rc)
257                 RETURN(rc);
258
259         stripes = min(desc.ld_tgt_count, (__u32)LOV_MAX_STRIPE_COUNT);
260         lsm.lsm_stripe_count = stripes;
261         easize = obd_size_diskmd(dt_exp, &lsm);
262
263         lsm.lsm_stripe_count = desc.ld_default_stripe_count;
264         def_easize = obd_size_diskmd(dt_exp, &lsm);
265
266         cookiesize = stripes * sizeof(struct llog_cookie);
267
268         CDEBUG(D_HA, "updating max_mdsize/max_cookiesize: %d/%d\n",
269                easize, cookiesize);
270
271         rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize);
272         RETURN(rc);
273 }
274
275 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
276 {
277         struct inode *root = 0;
278         struct ll_sb_info *sbi = ll_s2sbi(sb);
279         struct obd_device *obd;
280         struct lu_fid rootfid;
281         struct obd_capa *oc = NULL;
282         struct obd_statfs osfs;
283         struct ptlrpc_request *request = NULL;
284         struct lustre_handle dt_conn = {0, };
285         struct lustre_handle md_conn = {0, };
286         struct obd_connect_data *data = NULL;
287         struct lustre_md lmd;
288         obd_valid valid;
289         int size, err, checksum;
290         ENTRY;
291
292         obd = class_name2obd(md);
293         if (!obd) {
294                 CERROR("MD %s: not setup or attached\n", md);
295                 RETURN(-EINVAL);
296         }
297
298         OBD_ALLOC_PTR(data);
299         if (data == NULL)
300                 RETURN(-ENOMEM);
301
302         if (proc_lustre_fs_root) {
303                 err = lprocfs_register_mountpoint(proc_lustre_fs_root, sb,
304                                                   dt, md);
305                 if (err < 0)
306                         CERROR("could not register mount in /proc/fs/lustre\n");
307         }
308
309         /* indicate the features supported by this client */
310         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
311                                   OBD_CONNECT_JOIN     | OBD_CONNECT_ATTRFID  |
312                                   OBD_CONNECT_VERSION  | OBD_CONNECT_MDS_CAPA |
313                                   OBD_CONNECT_OSS_CAPA | OBD_CONNECT_CANCELSET|
314                                   OBD_CONNECT_FID      | OBD_CONNECT_AT;
315
316 #ifdef HAVE_LRU_RESIZE_SUPPORT
317         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
318                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
319 #endif
320 #ifdef CONFIG_FS_POSIX_ACL
321         data->ocd_connect_flags |= OBD_CONNECT_ACL;
322 #endif
323         data->ocd_ibits_known = MDS_INODELOCK_FULL;
324         data->ocd_version = LUSTRE_VERSION_CODE;
325
326         if (sb->s_flags & MS_RDONLY)
327                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
328         if (sbi->ll_flags & LL_SBI_USER_XATTR)
329                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
330
331 #ifdef HAVE_MS_FLOCK_LOCK
332         /* force vfs to use lustre handler for flock() calls - bug 10743 */
333         sb->s_flags |= MS_FLOCK_LOCK;
334 #endif
335         
336         if (sbi->ll_flags & LL_SBI_FLOCK)
337                 sbi->ll_fop = &ll_file_operations_flock;
338         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
339                 sbi->ll_fop = &ll_file_operations;
340         else
341                 sbi->ll_fop = &ll_file_operations_noflock;
342
343         /* real client */
344         data->ocd_connect_flags |= OBD_CONNECT_REAL;
345         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
346                 data->ocd_connect_flags &= ~OBD_CONNECT_LCL_CLIENT;
347                 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT;
348         } else {
349                 data->ocd_connect_flags &= ~OBD_CONNECT_RMT_CLIENT;
350                 data->ocd_connect_flags |= OBD_CONNECT_LCL_CLIENT;
351         }
352
353         err = obd_connect(NULL, &md_conn, obd, &sbi->ll_sb_uuid, data, NULL);
354         if (err == -EBUSY) {
355                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
356                                    "recovery, of which this client is not a "
357                                    "part. Please wait for recovery to complete,"
358                                    " abort, or time out.\n", md);
359                 GOTO(out, err);
360         } else if (err) {
361                 CERROR("cannot connect to %s: rc = %d\n", md, err);
362                 GOTO(out, err);
363         }
364         sbi->ll_md_exp = class_conn2export(&md_conn);
365
366         err = obd_fid_init(sbi->ll_md_exp);
367         if (err) {
368                 CERROR("Can't init metadata layer FID infrastructure, "
369                        "rc %d\n", err);
370                 GOTO(out_md, err);
371         }
372
373         err = obd_statfs(obd, &osfs, cfs_time_current_64() - HZ, 0);
374         if (err)
375                 GOTO(out_md_fid, err);
376
377         size = sizeof(*data);
378         err = obd_get_info(sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
379                            KEY_CONN_DATA,  &size, data, NULL);
380         if (err) {
381                 CERROR("Get connect data failed: %d \n", err);
382                 GOTO(out_md, err);
383         }
384
385         LASSERT(osfs.os_bsize);
386         sb->s_blocksize = osfs.os_bsize;
387         sb->s_blocksize_bits = log2(osfs.os_bsize);
388         sb->s_magic = LL_SUPER_MAGIC;
389
390         /* for bug 11559. in $LINUX/fs/read_write.c, function do_sendfile():
391          *         retval = in_file->f_op->sendfile(...);
392          *         if (*ppos > max)
393          *                 retval = -EOVERFLOW;
394          *
395          * it will check if *ppos is greater than max. However, max equals to
396          * s_maxbytes, which is a negative integer in a x86_64 box since loff_t
397          * has been defined as a signed long long ineger in linux kernel. */
398 #if BITS_PER_LONG == 64
399         sb->s_maxbytes = PAGE_CACHE_MAXBYTES >> 1;
400 #else
401         sb->s_maxbytes = PAGE_CACHE_MAXBYTES;
402 #endif
403         sbi->ll_namelen = osfs.os_namelen;
404         sbi->ll_max_rw_chunk = LL_DEFAULT_MAX_RW_CHUNK;
405
406         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
407             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
408                 LCONSOLE_INFO("Disabling user_xattr feature because "
409                               "it is not supported on the server\n");
410                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
411         }
412
413         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
414 #ifdef MS_POSIXACL
415                 sb->s_flags |= MS_POSIXACL;
416 #endif
417                 sbi->ll_flags |= LL_SBI_ACL;
418         } else {
419                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
420 #ifdef MS_POSIXACL
421                 sb->s_flags &= ~MS_POSIXACL;
422 #endif
423                 sbi->ll_flags &= ~LL_SBI_ACL;
424         }
425
426         if (data->ocd_connect_flags & OBD_CONNECT_JOIN)
427                 sbi->ll_flags |= LL_SBI_JOIN;
428
429         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
430                 if (!(data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT)) {
431                         /* sometimes local client claims to be remote, but mdt
432                          * will disagree when client gss not applied. */
433                         LCONSOLE_INFO("client claims to be remote, but server "
434                                       "rejected, forced to be local.\n");
435                         sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
436                 }
437         } else {
438                 if (!(data->ocd_connect_flags & OBD_CONNECT_LCL_CLIENT)) {
439                         /* with gss applied, remote client can not claim to be
440                          * local, so mdt maybe force client to be remote. */
441                         LCONSOLE_INFO("client claims to be local, but server "
442                                       "rejected, forced to be remote.\n");
443                         sbi->ll_flags |= LL_SBI_RMT_CLIENT;
444                 }
445         }
446
447         if (data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) {
448                 LCONSOLE_INFO("client enabled MDS capability!\n");
449                 sbi->ll_flags |= LL_SBI_MDS_CAPA;
450         }
451
452         if (data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA) {
453                 LCONSOLE_INFO("client enabled OSS capability!\n");
454                 sbi->ll_flags |= LL_SBI_OSS_CAPA;
455         }
456
457         sbi->ll_sdev_orig = sb->s_dev;
458 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
459         /* We set sb->s_dev equal on all lustre clients in order to support
460          * NFS export clustering.  NFSD requires that the FSID be the same
461          * on all clients. */
462         /* s_dev is also used in lt_compare() to compare two fs, but that is
463          * only a node-local comparison. */
464
465         /* XXX: this will not work with LMV */
466         sb->s_dev = get_uuid2int(sbi2mdc(sbi)->cl_target_uuid.uuid,
467                                  strlen(sbi2mdc(sbi)->cl_target_uuid.uuid));
468 #endif
469
470         obd = class_name2obd(dt);
471         if (!obd) {
472                 CERROR("DT %s: not setup or attached\n", dt);
473                 GOTO(out_md_fid, err = -ENODEV);
474         }
475
476         data->ocd_connect_flags = OBD_CONNECT_GRANT     | OBD_CONNECT_VERSION  |
477                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
478                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID      |
479                                   OBD_CONNECT_SRVLOCK   | OBD_CONNECT_TRUNCLOCK|
480                                   OBD_CONNECT_AT;
481         if (sbi->ll_flags & LL_SBI_OSS_CAPA)
482                 data->ocd_connect_flags |= OBD_CONNECT_OSS_CAPA;
483
484         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
485                 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
486                  * disabled by default, because it can still be enabled on the
487                  * fly via /proc. As a consequence, we still need to come to an
488                  * agreement on the supported algorithms at connect time */
489                 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
490
491                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
492                         data->ocd_cksum_types = OBD_CKSUM_ADLER;
493                 else
494                         /* send the list of supported checksum types */
495                         data->ocd_cksum_types = OBD_CKSUM_ALL;
496         }
497
498 #ifdef HAVE_LRU_RESIZE_SUPPORT
499         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
500 #endif
501         CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d "
502                "ocd_grant: %d\n", data->ocd_connect_flags,
503                data->ocd_version, data->ocd_grant);
504
505         obd->obd_upcall.onu_owner = &sbi->ll_lco;
506         obd->obd_upcall.onu_upcall = ll_ocd_update;
507         data->ocd_brw_size = PTLRPC_MAX_BRW_PAGES << CFS_PAGE_SHIFT;
508
509         err = obd_connect(NULL, &dt_conn, obd, &sbi->ll_sb_uuid, data, NULL);
510         if (err == -EBUSY) {
511                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
512                                    "recovery, of which this client is not a "
513                                    "part.  Please wait for recovery to "
514                                    "complete, abort, or time out.\n", dt);
515                 GOTO(out_md_fid, err);
516         } else if (err) {
517                 CERROR("Cannot connect to %s: rc = %d\n", dt, err);
518                 GOTO(out_md_fid, err);
519         }
520
521         sbi->ll_dt_exp = class_conn2export(&dt_conn);
522
523         err = obd_fid_init(sbi->ll_dt_exp);
524         if (err) {
525                 CERROR("Can't init data layer FID infrastructure, "
526                        "rc %d\n", err);
527                 GOTO(out_dt, err);
528         }
529         
530         spin_lock(&sbi->ll_lco.lco_lock);
531         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
532         spin_unlock(&sbi->ll_lco.lco_lock);
533
534         err = obd_register_page_removal_cb(sbi->ll_dt_exp,
535                                            ll_page_removal_cb, 
536                                            ll_pin_extent_cb);
537         if (err) {
538                 CERROR("cannot register page removal callback: rc = %d\n",err);
539                 GOTO(out_dt, err);
540         }
541         err = obd_register_lock_cancel_cb(sbi->ll_dt_exp,
542                                           ll_extent_lock_cancel_cb);
543         if (err) {
544                 CERROR("cannot register lock cancel callback: rc = %d\n", err);
545                 GOTO(out_page_rm_cb, err);
546         }
547
548         err = ll_init_ea_size(sbi->ll_md_exp, sbi->ll_dt_exp);;
549         if (err) {
550                 CERROR("cannot set max EA and cookie sizes: rc = %d\n", err);
551                 GOTO(out_lock_cn_cb, err);
552         }
553
554         err = obd_prep_async_page(sbi->ll_dt_exp, NULL, NULL, NULL,
555                                   0, NULL, NULL, NULL, 0, NULL);
556         if (err < 0) {
557                 LCONSOLE_ERROR_MSG(0x151, "There are no OST's in this "
558                                    "filesystem. There must be at least one "
559                                    "active OST for a client to start.\n");
560                 GOTO(out_lock_cn_cb, err);
561         }
562
563         if (!ll_async_page_slab) {
564                 ll_async_page_slab_size =
565                         size_round(sizeof(struct ll_async_page)) + err;
566                 ll_async_page_slab = cfs_mem_cache_create("ll_async_page",
567                                                           ll_async_page_slab_size,
568                                                           0, 0);
569                 if (!ll_async_page_slab)
570                         GOTO(out_lock_cn_cb, err = -ENOMEM);
571         }
572
573         err = md_getstatus(sbi->ll_md_exp, &rootfid, &oc);
574         if (err) {
575                 CERROR("cannot mds_connect: rc = %d\n", err);
576                 GOTO(out_lock_cn_cb, err);
577         }
578         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&rootfid));
579         sbi->ll_root_fid = rootfid;
580
581         sb->s_op = &lustre_super_operations;
582         sb->s_export_op = &lustre_export_operations;
583
584         /* make root inode
585          * XXX: move this to after cbd setup? */
586         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMDSCAPA;
587         if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
588                 valid |= OBD_MD_FLRMTPERM;
589         else if (sbi->ll_flags & LL_SBI_ACL)
590                 valid |= OBD_MD_FLACL;
591
592         err = md_getattr(sbi->ll_md_exp, &rootfid, oc, valid, 0, &request);
593         if (oc)
594                 free_capa(oc);
595         if (err) {
596                 CERROR("md_getattr failed for root: rc = %d\n", err);
597                 GOTO(out_lock_cn_cb, err);
598         }
599         memset(&lmd, 0, sizeof(lmd));
600         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
601                                sbi->ll_md_exp, &lmd);
602         if (err) {
603                 CERROR("failed to understand root inode md: rc = %d\n", err);
604                 ptlrpc_req_finished (request);
605                 GOTO(out_lock_cn_cb, err);
606         }
607
608         LASSERT(fid_is_sane(&sbi->ll_root_fid));
609         root = ll_iget(sb, ll_fid_build_ino(sbi, &sbi->ll_root_fid), &lmd);
610         md_free_lustre_md(sbi->ll_md_exp, &lmd);
611         ptlrpc_req_finished(request);
612
613         if (root == NULL || is_bad_inode(root)) {
614                 if (lmd.lsm)
615                         obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
616 #ifdef CONFIG_FS_POSIX_ACL
617                 if (lmd.posix_acl) {
618                         posix_acl_release(lmd.posix_acl);
619                         lmd.posix_acl = NULL;
620                 }
621 #endif
622                 CERROR("lustre_lite: bad iget4 for root\n");
623                 GOTO(out_root, err = -EBADF);
624         }
625
626         err = ll_close_thread_start(&sbi->ll_lcq);
627         if (err) {
628                 CERROR("cannot start close thread: rc %d\n", err);
629                 GOTO(out_root, err);
630         }
631
632 #ifdef CONFIG_FS_POSIX_ACL
633         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
634                 rct_init(&sbi->ll_rct);
635                 et_init(&sbi->ll_et);
636         }
637 #endif
638
639         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
640         err = obd_set_info_async(sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
641                                  KEY_CHECKSUM, sizeof(checksum), &checksum,
642                                  NULL);
643
644         sb->s_root = d_alloc_root(root);
645         if (data != NULL)
646                 OBD_FREE(data, sizeof(*data));
647         sb->s_root->d_op = &ll_d_root_ops;
648         RETURN(err);
649 out_root:
650         if (root)
651                 iput(root);
652 out_lock_cn_cb:
653         obd_unregister_lock_cancel_cb(sbi->ll_dt_exp,
654                                       ll_extent_lock_cancel_cb);
655 out_page_rm_cb:
656         obd_unregister_page_removal_cb(sbi->ll_dt_exp,
657                                        ll_page_removal_cb);
658         obd_fid_fini(sbi->ll_dt_exp);
659 out_dt:
660         obd_disconnect(sbi->ll_dt_exp);
661         sbi->ll_dt_exp = NULL;
662 out_md_fid:
663         obd_fid_fini(sbi->ll_md_exp);
664 out_md:
665         obd_disconnect(sbi->ll_md_exp);
666         sbi->ll_md_exp = NULL;
667 out:
668         if (data != NULL)
669                 OBD_FREE_PTR(data);
670         lprocfs_unregister_mountpoint(sbi);
671         return err;
672 }
673
674 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
675 {
676         int size, rc;
677
678         *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
679         size = sizeof(int);
680         rc = obd_get_info(sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
681                           KEY_MAX_EASIZE, &size, lmmsize, NULL);
682         if (rc)
683                 CERROR("Get max mdsize error rc %d \n", rc);
684
685         RETURN(rc);
686 }
687
688 void ll_dump_inode(struct inode *inode)
689 {
690         struct list_head *tmp;
691         int dentry_count = 0;
692
693         LASSERT(inode != NULL);
694
695         list_for_each(tmp, &inode->i_dentry)
696                 dentry_count++;
697
698         CERROR("inode %p dump: dev=%s ino=%lu mode=%o count=%u, %d dentries\n",
699                inode, ll_i2mdexp(inode)->exp_obd->obd_name, inode->i_ino,
700                inode->i_mode, atomic_read(&inode->i_count), dentry_count);
701 }
702
703 void lustre_dump_dentry(struct dentry *dentry, int recur)
704 {
705         struct list_head *tmp;
706         int subdirs = 0;
707
708         LASSERT(dentry != NULL);
709
710         list_for_each(tmp, &dentry->d_subdirs)
711                 subdirs++;
712
713         CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u,"
714                " flags=0x%x, fsdata=%p, %d subdirs\n", dentry,
715                dentry->d_name.len, dentry->d_name.name,
716                dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
717                dentry->d_parent, dentry->d_inode, atomic_read(&dentry->d_count),
718                dentry->d_flags, dentry->d_fsdata, subdirs);
719         if (dentry->d_inode != NULL)
720                 ll_dump_inode(dentry->d_inode);
721
722         if (recur == 0)
723                 return;
724
725         list_for_each(tmp, &dentry->d_subdirs) {
726                 struct dentry *d = list_entry(tmp, struct dentry, d_child);
727                 lustre_dump_dentry(d, recur - 1);
728         }
729 }
730
731 #ifdef HAVE_EXPORT___IGET
732 static void prune_dir_dentries(struct inode *inode)
733 {
734         struct dentry *dentry, *prev = NULL;
735
736         /* due to lustre specific logic, a directory
737          * can have few dentries - a bug from VFS POV */
738 restart:
739         spin_lock(&dcache_lock);
740         if (!list_empty(&inode->i_dentry)) {
741                 dentry = list_entry(inode->i_dentry.prev,
742                                     struct dentry, d_alias);
743                 /* in order to prevent infinite loops we
744                  * break if previous dentry is busy */
745                 if (dentry != prev) {
746                         prev = dentry;
747                         dget_locked(dentry);
748                         spin_unlock(&dcache_lock);
749
750                         /* try to kill all child dentries */
751                         lock_dentry(dentry);
752                         shrink_dcache_parent(dentry);
753                         unlock_dentry(dentry);
754                         dput(dentry);
755
756                         /* now try to get rid of current dentry */
757                         d_prune_aliases(inode);
758                         goto restart;
759                 }
760         }
761         spin_unlock(&dcache_lock);
762 }
763
764 static void prune_deathrow_one(struct ll_inode_info *lli)
765 {
766         struct inode *inode = ll_info2i(lli);
767
768         /* first, try to drop any dentries - they hold a ref on the inode */
769         if (S_ISDIR(inode->i_mode))
770                 prune_dir_dentries(inode);
771         else
772                 d_prune_aliases(inode);
773
774
775         /* if somebody still uses it, leave it */
776         LASSERT(atomic_read(&inode->i_count) > 0);
777         if (atomic_read(&inode->i_count) > 1)
778                 goto out;
779
780         CDEBUG(D_INODE, "inode %lu/%u(%d) looks a good candidate for prune\n",
781                inode->i_ino,inode->i_generation, atomic_read(&inode->i_count));
782
783         /* seems nobody uses it anymore */
784         inode->i_nlink = 0;
785
786 out:
787         iput(inode);
788         return;
789 }
790
791 static void prune_deathrow(struct ll_sb_info *sbi, int try)
792 {
793         struct ll_inode_info *lli;
794         int empty;
795
796         do {
797                 if (need_resched() && try)
798                         break;
799
800                 if (try) {
801                         if (!spin_trylock(&sbi->ll_deathrow_lock))
802                                 break;
803                 } else {
804                         spin_lock(&sbi->ll_deathrow_lock);
805                 }
806
807                 empty = 1;
808                 lli = NULL;
809                 if (!list_empty(&sbi->ll_deathrow)) {
810                         lli = list_entry(sbi->ll_deathrow.next,
811                                          struct ll_inode_info,
812                                          lli_dead_list);
813                         list_del_init(&lli->lli_dead_list);
814                         if (!list_empty(&sbi->ll_deathrow))
815                                 empty = 0;
816                 }
817                 spin_unlock(&sbi->ll_deathrow_lock);
818
819                 if (lli)
820                         prune_deathrow_one(lli);
821
822         } while (empty == 0);
823 }
824 #else /* !HAVE_EXPORT___IGET */
825 #define prune_deathrow(sbi, try) do {} while (0)
826 #endif /* HAVE_EXPORT___IGET */
827
828 void client_common_put_super(struct super_block *sb)
829 {
830         struct ll_sb_info *sbi = ll_s2sbi(sb);
831         ENTRY;
832
833 #ifdef CONFIG_FS_POSIX_ACL
834         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
835                 et_fini(&sbi->ll_et);
836                 rct_fini(&sbi->ll_rct);
837         }
838 #endif
839
840         obd_cancel_unused(sbi->ll_dt_exp, NULL, 0, NULL);
841
842         ll_close_thread_shutdown(sbi->ll_lcq);
843
844         /* destroy inodes in deathrow */
845         prune_deathrow(sbi, 0);
846
847         list_del(&sbi->ll_conn_chain);
848
849         obd_fid_fini(sbi->ll_dt_exp);
850         obd_disconnect(sbi->ll_dt_exp);
851         sbi->ll_dt_exp = NULL;
852
853         lprocfs_unregister_mountpoint(sbi);
854
855         obd_fid_fini(sbi->ll_md_exp);
856         obd_disconnect(sbi->ll_md_exp);
857         sbi->ll_md_exp = NULL;
858
859         EXIT;
860 }
861
862 void ll_kill_super(struct super_block *sb)
863 {
864         struct ll_sb_info *sbi;
865
866         ENTRY;
867
868         /* not init sb ?*/
869         if (!(sb->s_flags & MS_ACTIVE))
870                 return;
871
872         sbi = ll_s2sbi(sb);
873         /* we need restore s_dev from changed for clustred NFS before put_super
874          * because new kernels have cached s_dev and change sb->s_dev in
875          * put_super not affected real removing devices */
876         if (sbi)
877                 sb->s_dev = sbi->ll_sdev_orig;
878         EXIT;
879 }
880
881 char *ll_read_opt(const char *opt, char *data)
882 {
883         char *value;
884         char *retval;
885         ENTRY;
886
887         CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
888         if (strncmp(opt, data, strlen(opt)))
889                 RETURN(NULL);
890         if ((value = strchr(data, '=')) == NULL)
891                 RETURN(NULL);
892
893         value++;
894         OBD_ALLOC(retval, strlen(value) + 1);
895         if (!retval) {
896                 CERROR("out of memory!\n");
897                 RETURN(NULL);
898         }
899
900         memcpy(retval, value, strlen(value)+1);
901         CDEBUG(D_SUPER, "Assigned option: %s, value %s\n", opt, retval);
902         RETURN(retval);
903 }
904
905 static inline int ll_set_opt(const char *opt, char *data, int fl)
906 {
907         if (strncmp(opt, data, strlen(opt)) != 0)
908                 return(0);
909         else
910                 return(fl);
911 }
912
913 /* non-client-specific mount options are parsed in lmd_parse */
914 static int ll_options(char *options, int *flags)
915 {
916         int tmp;
917         char *s1 = options, *s2;
918         ENTRY;
919
920         if (!options) 
921                 RETURN(0);
922
923         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
924
925         while (*s1) {
926                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
927                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
928                 if (tmp) {
929                         *flags |= tmp;
930                         goto next;
931                 }
932                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
933                 if (tmp) {
934                         *flags |= tmp;
935                         goto next;
936                 }
937                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
938                 if (tmp) {
939                         *flags |= tmp;
940                         goto next;
941                 }
942                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
943                 if (tmp) {
944                         *flags &= ~tmp;
945                         goto next;
946                 }
947                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
948                 if (tmp) {
949                         *flags |= tmp;
950                         goto next;
951                 }
952                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
953                 if (tmp) {
954                         *flags &= ~tmp;
955                         goto next;
956                 }
957                 tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
958                 if (tmp) {
959                         /* Ignore deprecated mount option.  The client will
960                          * always try to mount with ACL support, whether this
961                          * is used depends on whether server supports it. */
962                         goto next;
963                 }
964                 tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
965                 if (tmp) {
966                         goto next;
967                 }
968                 tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
969                 if (tmp) {
970                         *flags |= tmp;
971                         goto next;
972                 }
973
974                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
975                 if (tmp) {
976                         *flags |= tmp;
977                         goto next;
978                 }
979                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
980                 if (tmp) {
981                         *flags &= ~tmp;
982                         goto next;
983                 }
984                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
985                 if (tmp) {
986                         *flags |= tmp;
987                         goto next;
988                 }
989                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
990                 if (tmp) {
991                         *flags &= ~tmp;
992                         goto next;
993                 }
994
995                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
996                                    s1);
997                 RETURN(-EINVAL);
998
999 next:
1000                 /* Find next opt */
1001                 s2 = strchr(s1, ',');
1002                 if (s2 == NULL)
1003                         break;
1004                 s1 = s2 + 1;
1005         }
1006         RETURN(0);
1007 }
1008
1009 void ll_lli_init(struct ll_inode_info *lli)
1010 {
1011         lli->lli_inode_magic = LLI_INODE_MAGIC;
1012         sema_init(&lli->lli_size_sem, 1);
1013         sema_init(&lli->lli_write_sem, 1);
1014         lli->lli_flags = 0;
1015         lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
1016         spin_lock_init(&lli->lli_lock);
1017         INIT_LIST_HEAD(&lli->lli_pending_write_llaps);
1018         INIT_LIST_HEAD(&lli->lli_close_list);
1019         lli->lli_inode_magic = LLI_INODE_MAGIC;
1020         sema_init(&lli->lli_och_sem, 1);
1021         lli->lli_mds_read_och = lli->lli_mds_write_och = NULL;
1022         lli->lli_mds_exec_och = NULL;
1023         lli->lli_open_fd_read_count = lli->lli_open_fd_write_count = 0;
1024         lli->lli_open_fd_exec_count = 0;
1025         INIT_LIST_HEAD(&lli->lli_dead_list);
1026         lli->lli_remote_perms = NULL;
1027         lli->lli_rmtperm_utime = 0;
1028         sema_init(&lli->lli_rmtperm_sem, 1);
1029         INIT_LIST_HEAD(&lli->lli_oss_capas);
1030 }
1031
1032 int ll_fill_super(struct super_block *sb)
1033 {
1034         struct lustre_profile *lprof;
1035         struct lustre_sb_info *lsi = s2lsi(sb);
1036         struct ll_sb_info *sbi;
1037         char  *dt = NULL, *md = NULL;
1038         char  *profilenm = get_profile_name(sb);
1039         struct config_llog_instance cfg = {0, };
1040         char   ll_instance[sizeof(sb) * 2 + 1];
1041         int    err;
1042         ENTRY;
1043
1044         CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
1045
1046         cfs_module_get();
1047
1048         /* client additional sb info */
1049         lsi->lsi_llsbi = sbi = ll_init_sbi();
1050         if (!sbi) {
1051                 cfs_module_put();
1052                 RETURN(-ENOMEM);
1053         }
1054
1055         err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
1056         if (err) 
1057                 GOTO(out_free, err);
1058
1059         /* Generate a string unique to this super, in case some joker tries
1060            to mount the same fs at two mount points.
1061            Use the address of the super itself.*/
1062         sprintf(ll_instance, "%p", sb);
1063         cfg.cfg_instance = ll_instance;
1064         cfg.cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1065
1066         /* set up client obds */
1067         err = lustre_process_log(sb, profilenm, &cfg);
1068         if (err < 0) {
1069                 CERROR("Unable to process log: %d\n", err);
1070                 GOTO(out_free, err);
1071         }
1072
1073         lprof = class_get_profile(profilenm);
1074         if (lprof == NULL) {
1075                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1076                                    " read from the MGS.  Does that filesystem "
1077                                    "exist?\n", profilenm);
1078                 GOTO(out_free, err = -EINVAL);
1079         }
1080         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1081                lprof->lp_md, lprof->lp_dt);
1082
1083         OBD_ALLOC(dt, strlen(lprof->lp_dt) +
1084                   strlen(ll_instance) + 2);
1085         if (!dt)
1086                 GOTO(out_free, err = -ENOMEM);
1087         sprintf(dt, "%s-%s", lprof->lp_dt, ll_instance);
1088
1089         OBD_ALLOC(md, strlen(lprof->lp_md) +
1090                   strlen(ll_instance) + 2);
1091         if (!md)
1092                 GOTO(out_free, err = -ENOMEM);
1093         sprintf(md, "%s-%s", lprof->lp_md, ll_instance);
1094
1095         /* connections, registrations, sb setup */
1096         err = client_common_fill_super(sb, md, dt);
1097
1098 out_free:
1099         if (md)
1100                 OBD_FREE(md, strlen(md) + 1);
1101         if (dt)
1102                 OBD_FREE(dt, strlen(dt) + 1);
1103         if (err) 
1104                 ll_put_super(sb);
1105         else
1106                 LCONSOLE_WARN("Client %s has started\n", profilenm);        
1107
1108         RETURN(err);
1109 } /* ll_fill_super */
1110
1111
1112 void ll_put_super(struct super_block *sb)
1113 {
1114         struct config_llog_instance cfg;
1115         char   ll_instance[sizeof(sb) * 2 + 1];
1116         struct obd_device *obd;
1117         struct lustre_sb_info *lsi = s2lsi(sb);
1118         struct ll_sb_info *sbi = ll_s2sbi(sb);
1119         char *profilenm = get_profile_name(sb);
1120         int force = 1, next;
1121         ENTRY;
1122
1123         CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
1124
1125         ll_print_capa_stat(sbi);
1126
1127         sprintf(ll_instance, "%p", sb);
1128         cfg.cfg_instance = ll_instance;
1129         lustre_end_log(sb, NULL, &cfg);
1130         
1131         if (sbi->ll_md_exp) {
1132                 obd = class_exp2obd(sbi->ll_md_exp);
1133                 if (obd) 
1134                         force = obd->obd_force;
1135         }
1136         
1137         /* We need to set force before the lov_disconnect in 
1138            lustre_common_put_super, since l_d cleans up osc's as well. */
1139         if (force) {
1140                 next = 0;
1141                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1142                                                      &next)) != NULL) {
1143                         obd->obd_force = force;
1144                 }
1145         }                       
1146
1147         if (sbi->ll_lcq) {
1148                 /* Only if client_common_fill_super succeeded */
1149                 client_common_put_super(sb);
1150         }
1151         next = 0;
1152         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) {
1153                 class_manual_cleanup(obd);
1154         }
1155
1156         if (profilenm)
1157                 class_del_profile(profilenm);
1158
1159         ll_free_sbi(sb);
1160         lsi->lsi_llsbi = NULL;
1161
1162         lustre_common_put_super(sb);
1163
1164         LCONSOLE_WARN("client %s umount complete\n", ll_instance);
1165         
1166         cfs_module_put();
1167
1168         EXIT;
1169 } /* client_put_super */
1170
1171 #if defined(HAVE_REGISTER_CACHE) || defined(HAVE_SHRINKER_CACHE)
1172
1173 #if defined(HAVE_CACHE_RETURN_INT)
1174 static int
1175 #else
1176 static void
1177 #endif
1178 ll_shrink_cache(int priority, unsigned int gfp_mask)
1179 {
1180         struct ll_sb_info *sbi;
1181         int count = 0;
1182
1183         list_for_each_entry(sbi, &ll_super_blocks, ll_list)
1184                 count += llap_shrink_cache(sbi, priority);
1185
1186 #if defined(HAVE_CACHE_RETURN_INT)
1187         return count;
1188 #endif
1189 }
1190
1191 struct cache_definition ll_cache_definition = {
1192         .name = "llap_cache",
1193         .shrink = ll_shrink_cache
1194 };
1195 #endif /* HAVE_REGISTER_CACHE || HAVE_SHRINKER_CACHE */
1196
1197 struct inode *ll_inode_from_lock(struct ldlm_lock *lock)
1198 {
1199         struct inode *inode = NULL;
1200         /* NOTE: we depend on atomic igrab() -bzzz */
1201         lock_res_and_lock(lock);
1202         if (lock->l_ast_data) {
1203                 struct ll_inode_info *lli = ll_i2info(lock->l_ast_data);
1204                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1205                         inode = igrab(lock->l_ast_data);
1206                 } else {
1207                         inode = lock->l_ast_data;
1208                         ldlm_lock_debug(NULL, inode->i_state & I_FREEING ?
1209                                                 D_INFO : D_WARNING,
1210                                         lock, __FILE__, __func__, __LINE__,
1211                                         "l_ast_data %p is bogus: magic %08x",
1212                                         lock->l_ast_data, lli->lli_inode_magic);
1213                         inode = NULL;
1214                 }
1215         }
1216         unlock_res_and_lock(lock);
1217         return inode;
1218 }
1219
1220 static int null_if_equal(struct ldlm_lock *lock, void *data)
1221 {
1222         if (data == lock->l_ast_data) {
1223                 lock->l_ast_data = NULL;
1224
1225                 if (lock->l_req_mode != lock->l_granted_mode)
1226                         LDLM_ERROR(lock,"clearing inode with ungranted lock");
1227         }
1228
1229         return LDLM_ITER_CONTINUE;
1230 }
1231
1232 void ll_clear_inode(struct inode *inode)
1233 {
1234         struct ll_inode_info *lli = ll_i2info(inode);
1235         struct ll_sb_info *sbi = ll_i2sbi(inode);
1236         ENTRY;
1237
1238         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1239                inode->i_generation, inode);
1240
1241         if (S_ISDIR(inode->i_mode)) {
1242                 /* these should have been cleared in ll_file_release */
1243                 LASSERT(lli->lli_sai == NULL);
1244                 LASSERT(lli->lli_opendir_key == NULL);
1245                 LASSERT(lli->lli_opendir_pid == 0);
1246         }
1247
1248         ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1249         md_change_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
1250                          null_if_equal, inode);
1251
1252         LASSERT(!lli->lli_open_fd_write_count);
1253         LASSERT(!lli->lli_open_fd_read_count);
1254         LASSERT(!lli->lli_open_fd_exec_count);
1255
1256         if (lli->lli_mds_write_och)
1257                 ll_md_real_close(inode, FMODE_WRITE);
1258         if (lli->lli_mds_exec_och)
1259                 ll_md_real_close(inode, FMODE_EXEC);
1260         if (lli->lli_mds_read_och)
1261                 ll_md_real_close(inode, FMODE_READ);
1262
1263         if (lli->lli_smd) {
1264                 obd_change_cbdata(sbi->ll_dt_exp, lli->lli_smd,
1265                                   null_if_equal, inode);
1266
1267                 obd_free_memmd(sbi->ll_dt_exp, &lli->lli_smd);
1268                 lli->lli_smd = NULL;
1269         }
1270
1271         if (lli->lli_symlink_name) {
1272                 OBD_FREE(lli->lli_symlink_name,
1273                          strlen(lli->lli_symlink_name) + 1);
1274                 lli->lli_symlink_name = NULL;
1275         }
1276
1277         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1278                 LASSERT(lli->lli_posix_acl == NULL);
1279                 if (lli->lli_remote_perms) {
1280                         free_rmtperm_hash(lli->lli_remote_perms);
1281                         lli->lli_remote_perms = NULL;
1282                 }
1283         }
1284 #ifdef CONFIG_FS_POSIX_ACL
1285         else if (lli->lli_posix_acl) {
1286                 LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
1287                 LASSERT(lli->lli_remote_perms == NULL);
1288                 posix_acl_release(lli->lli_posix_acl);
1289                 lli->lli_posix_acl = NULL;
1290         }
1291 #endif
1292         lli->lli_inode_magic = LLI_INODE_DEAD;
1293
1294 #ifdef HAVE_EXPORT___IGET
1295         spin_lock(&sbi->ll_deathrow_lock);
1296         list_del_init(&lli->lli_dead_list);
1297         spin_unlock(&sbi->ll_deathrow_lock);
1298 #endif
1299         ll_clear_inode_capas(inode);
1300
1301         EXIT;
1302 }
1303
1304 int ll_md_setattr(struct inode *inode, struct md_op_data *op_data,
1305                   struct md_open_data **mod)
1306 {
1307         struct lustre_md md;
1308         struct ll_sb_info *sbi = ll_i2sbi(inode);
1309         struct ptlrpc_request *request = NULL;
1310         int rc;
1311         ENTRY;
1312         
1313         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0, 
1314                                      LUSTRE_OPC_ANY, NULL);
1315         if (IS_ERR(op_data))
1316                 RETURN(PTR_ERR(op_data));
1317
1318         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0, 
1319                         &request, mod);
1320         if (rc) {
1321                 ptlrpc_req_finished(request);
1322                 if (rc == -ENOENT) {
1323                         inode->i_nlink = 0;
1324                         /* Unlinked special device node? Or just a race?
1325                          * Pretend we done everything. */
1326                         if (!S_ISREG(inode->i_mode) &&
1327                             !S_ISDIR(inode->i_mode))
1328                                 rc = inode_setattr(inode, &op_data->op_attr);
1329                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1330                         CERROR("md_setattr fails: rc = %d\n", rc);
1331                 }
1332                 RETURN(rc);
1333         }
1334
1335         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1336                               sbi->ll_md_exp, &md);
1337         if (rc) {
1338                 ptlrpc_req_finished(request);
1339                 RETURN(rc);
1340         }
1341
1342         /* We call inode_setattr to adjust timestamps.
1343          * If there is at least some data in file, we cleared ATTR_SIZE
1344          * above to avoid invoking vmtruncate, otherwise it is important
1345          * to call vmtruncate in inode_setattr to update inode->i_size
1346          * (bug 6196) */
1347         rc = inode_setattr(inode, &op_data->op_attr);
1348
1349         /* Extract epoch data if obtained. */
1350         op_data->op_handle = md.body->handle;
1351         op_data->op_ioepoch = md.body->ioepoch;
1352
1353         ll_update_inode(inode, &md);
1354         ptlrpc_req_finished(request);
1355
1356         RETURN(rc);
1357 }
1358
1359 /* Close IO epoch and send Size-on-MDS attribute update. */
1360 static int ll_setattr_done_writing(struct inode *inode,
1361                                    struct md_op_data *op_data,
1362                                    struct md_open_data *mod)
1363 {
1364         struct ll_inode_info *lli = ll_i2info(inode);
1365         int rc = 0;
1366         ENTRY;
1367         
1368         LASSERT(op_data != NULL);
1369         if (!S_ISREG(inode->i_mode))
1370                 RETURN(0);
1371
1372         CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID" for truncate\n",
1373                op_data->op_ioepoch, PFID(&lli->lli_fid));
1374
1375         op_data->op_flags = MF_EPOCH_CLOSE | MF_SOM_CHANGE;
1376         rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1377         if (rc == -EAGAIN) {
1378                 /* MDS has instructed us to obtain Size-on-MDS attribute
1379                  * from OSTs and send setattr to back to MDS. */
1380                 rc = ll_sizeonmds_update(inode, mod, &op_data->op_handle,
1381                                          op_data->op_ioepoch);
1382         } else if (rc) {
1383                 CERROR("inode %lu mdc truncate failed: rc = %d\n",
1384                        inode->i_ino, rc);
1385         }
1386         RETURN(rc);
1387 }
1388
1389 static int ll_setattr_do_truncate(struct inode *inode, loff_t new_size)
1390 {
1391         struct ll_sb_info *sbi = ll_i2sbi(inode);
1392         struct ll_inode_info *lli = ll_i2info(inode);
1393         struct lov_stripe_md *lsm = lli->lli_smd;
1394         int rc;
1395         ldlm_policy_data_t policy = { .l_extent = {new_size,
1396                                                    OBD_OBJECT_EOF } };
1397         struct lustre_handle lockh = { 0 };
1398         int local_lock = 0; /* 0 - no local lock;
1399                              * 1 - lock taken by lock_extent;
1400                              * 2 - by obd_match*/
1401         int ast_flags;
1402         int err;
1403         ENTRY;
1404
1405         UNLOCK_INODE_MUTEX(inode);
1406         UP_WRITE_I_ALLOC_SEM(inode);
1407
1408         if (sbi->ll_lockless_truncate_enable &&
1409             (sbi->ll_lco.lco_flags & OBD_CONNECT_TRUNCLOCK)) {
1410                 ast_flags = LDLM_FL_BLOCK_GRANTED;
1411                 rc = obd_match(sbi->ll_dt_exp, lsm, LDLM_EXTENT,
1412                                &policy, LCK_PW, &ast_flags, inode, &lockh);
1413                 if (rc > 0) {
1414                         local_lock = 2;
1415                         rc = 0;
1416                 } else if (rc == 0) {
1417                         rc = ll_file_punch(inode, new_size, 1);
1418                 }
1419         } else {
1420                 /* XXX when we fix the AST intents to pass the discard-range
1421                  * XXX extent, make ast_flags always LDLM_AST_DISCARD_DATA
1422                  * XXX here. */
1423                 ast_flags = (new_size == 0) ? LDLM_AST_DISCARD_DATA : 0;
1424                 rc = ll_extent_lock(NULL, inode, lsm, LCK_PW, &policy,
1425                                     &lockh, ast_flags);
1426                 if (likely(rc == 0))
1427                         local_lock = 1;
1428         }
1429
1430         LOCK_INODE_MUTEX(inode);
1431         DOWN_WRITE_I_ALLOC_SEM(inode);
1432
1433         if (likely(rc == 0)) {
1434                 /* Only ll_inode_size_lock is taken at this level.
1435                  * lov_stripe_lock() is grabbed by ll_truncate() only over
1436                  * call to obd_adjust_kms().  If vmtruncate returns 0, then
1437                  * ll_truncate dropped ll_inode_size_lock() */
1438                 ll_inode_size_lock(inode, 0);
1439                 if (!local_lock) {
1440                         spin_lock(&lli->lli_lock);
1441                         lli->lli_flags |= LLIF_SRVLOCK;
1442                         spin_unlock(&lli->lli_lock);
1443                 }
1444                 rc = vmtruncate(inode, new_size);
1445                 if (!local_lock) {
1446                         spin_lock(&lli->lli_lock);
1447                         lli->lli_flags &= ~LLIF_SRVLOCK;
1448                         spin_unlock(&lli->lli_lock);
1449                 }
1450                 if (rc != 0) {
1451                         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
1452                         ll_inode_size_unlock(inode, 0);
1453                 }
1454         }
1455
1456         if (local_lock) {
1457                 if (local_lock == 2)
1458                         err = obd_cancel(sbi->ll_dt_exp, lsm, LCK_PW, &lockh);
1459                 else
1460                         err = ll_extent_unlock(NULL, inode, lsm, LCK_PW, &lockh);
1461                 if (unlikely(err != 0)){
1462                         CERROR("extent unlock failed: err=%d,"
1463                                " unlock method =%d\n", err, local_lock);
1464                         if (rc == 0)
1465                                 rc = err;
1466                 }
1467         }
1468         RETURN(rc);
1469 }
1470
1471 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1472  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1473  * keep these values until such a time that objects are allocated for it.
1474  * We do the MDS operations first, as it is checking permissions for us.
1475  * We don't to the MDS RPC if there is nothing that we want to store there,
1476  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1477  * going to do an RPC anyways.
1478  *
1479  * If we are doing a truncate, we will send the mtime and ctime updates
1480  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1481  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1482  * at the same time.
1483  */
1484 int ll_setattr_raw(struct inode *inode, struct iattr *attr)
1485 {
1486         struct ll_inode_info *lli = ll_i2info(inode);
1487         struct lov_stripe_md *lsm = lli->lli_smd;
1488         struct ll_sb_info *sbi = ll_i2sbi(inode);
1489         struct md_op_data *op_data = NULL;
1490         struct md_open_data *mod = NULL;
1491         int ia_valid = attr->ia_valid;
1492         int rc = 0, rc1 = 0;
1493         ENTRY;
1494
1495         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu valid %x\n", inode->i_ino,
1496                attr->ia_valid);
1497         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETATTR, 1);
1498
1499         if (ia_valid & ATTR_SIZE) {
1500                 if (attr->ia_size > ll_file_maxbytes(inode)) {
1501                         CDEBUG(D_INODE, "file too large %llu > "LPU64"\n",
1502                                attr->ia_size, ll_file_maxbytes(inode));
1503                         RETURN(-EFBIG);
1504                 }
1505
1506                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1507         }
1508
1509         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1510         if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
1511                 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
1512                         RETURN(-EPERM);
1513         }
1514
1515         /* We mark all of the fields "set" so MDS/OST does not re-set them */
1516         if (attr->ia_valid & ATTR_CTIME) {
1517                 attr->ia_ctime = CURRENT_TIME;
1518                 attr->ia_valid |= ATTR_CTIME_SET;
1519         }
1520         if (!(ia_valid & ATTR_ATIME_SET) && (attr->ia_valid & ATTR_ATIME)) {
1521                 attr->ia_atime = CURRENT_TIME;
1522                 attr->ia_valid |= ATTR_ATIME_SET;
1523         }
1524         if (!(ia_valid & ATTR_MTIME_SET) && (attr->ia_valid & ATTR_MTIME)) {
1525                 attr->ia_mtime = CURRENT_TIME;
1526                 attr->ia_valid |= ATTR_MTIME_SET;
1527         }
1528         if ((attr->ia_valid & ATTR_CTIME) && !(attr->ia_valid & ATTR_MTIME)) {
1529                 /* To avoid stale mtime on mds, obtain it from ost and send 
1530                    to mds. */
1531                 rc = ll_glimpse_size(inode, 0);
1532                 if (rc) 
1533                         RETURN(rc);
1534                 
1535                 attr->ia_valid |= ATTR_MTIME_SET | ATTR_MTIME;
1536                 attr->ia_mtime = inode->i_mtime;
1537         }
1538
1539         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1540                 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
1541                        LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1542                        cfs_time_current_sec());
1543
1544         /* NB: ATTR_SIZE will only be set after this point if the size
1545          * resides on the MDS, ie, this file has no objects. */
1546         if (lsm)
1547                 attr->ia_valid &= ~ATTR_SIZE;
1548
1549         /* We always do an MDS RPC, even if we're only changing the size;
1550          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1551
1552         OBD_ALLOC_PTR(op_data);
1553         if (op_data == NULL)
1554                 RETURN(-ENOMEM);
1555
1556         memcpy(&op_data->op_attr, attr, sizeof(*attr));
1557
1558         /* Open epoch for truncate. */
1559         if ((ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM) &&
1560             (ia_valid & ATTR_SIZE))
1561                 op_data->op_flags = MF_EPOCH_OPEN;
1562
1563         rc = ll_md_setattr(inode, op_data, &mod);
1564         if (rc)
1565                 GOTO(out, rc);
1566
1567         if (op_data->op_ioepoch)
1568                 CDEBUG(D_INODE, "Epoch "LPU64" opened on "DFID" for "
1569                        "truncate\n", op_data->op_ioepoch, PFID(&lli->lli_fid));
1570
1571         if (!lsm || !S_ISREG(inode->i_mode)) {
1572                 CDEBUG(D_INODE, "no lsm: not setting attrs on OST\n");
1573                 GOTO(out, rc = 0);
1574         }
1575
1576         /* We really need to get our PW lock before we change inode->i_size.
1577          * If we don't we can race with other i_size updaters on our node, like
1578          * ll_file_read.  We can also race with i_size propogation to other
1579          * nodes through dirtying and writeback of final cached pages.  This
1580          * last one is especially bad for racing o_append users on other
1581          * nodes. */
1582         if (ia_valid & ATTR_SIZE) {
1583                 rc = ll_setattr_do_truncate(inode, attr->ia_size);
1584         } else if (ia_valid & (ATTR_MTIME | ATTR_MTIME_SET)) {
1585                 obd_flag flags;
1586                 struct obd_info oinfo = { { { 0 } } };
1587                 struct obdo *oa;
1588
1589                 CDEBUG(D_INODE, "set mtime on OST inode %lu to %lu\n",
1590                        inode->i_ino, LTIME_S(attr->ia_mtime));
1591
1592                 OBDO_ALLOC(oa);
1593                 if (oa) {
1594                         oa->o_id = lsm->lsm_object_id;
1595                         oa->o_gr = lsm->lsm_object_gr;
1596                         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
1597
1598                         flags = OBD_MD_FLTYPE | OBD_MD_FLATIME |
1599                                 OBD_MD_FLMTIME | OBD_MD_FLCTIME |
1600                                 OBD_MD_FLFID | OBD_MD_FLGENER | 
1601                                 OBD_MD_FLGROUP;
1602
1603                         obdo_from_inode(oa, inode, flags);
1604
1605                         oinfo.oi_oa = oa;
1606                         oinfo.oi_md = lsm;
1607                         oinfo.oi_capa = ll_mdscapa_get(inode);
1608
1609                         /* XXX: this looks unnecessary now. */
1610                         rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1611                         capa_put(oinfo.oi_capa);
1612                         if (rc)
1613                                 CERROR("obd_setattr_async fails: rc=%d\n", rc);
1614                         OBDO_FREE(oa);
1615                 } else {
1616                         rc = -ENOMEM;
1617                 }
1618         }
1619         EXIT;
1620 out:
1621         if (op_data) {
1622                 if (op_data->op_ioepoch)
1623                         rc1 = ll_setattr_done_writing(inode, op_data, mod);
1624                 ll_finish_md_op_data(op_data);
1625         }
1626         return rc ? rc : rc1;
1627 }
1628
1629 int ll_setattr(struct dentry *de, struct iattr *attr)
1630 {
1631         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
1632             (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
1633                 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1634
1635         return ll_setattr_raw(de->d_inode, attr);
1636 }
1637
1638 int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1639                        __u64 max_age, __u32 flags)
1640 {
1641         struct ll_sb_info *sbi = ll_s2sbi(sb);
1642         struct obd_statfs obd_osfs;
1643         int rc;
1644         ENTRY;
1645
1646         rc = obd_statfs(class_exp2obd(sbi->ll_md_exp), osfs, max_age, flags);
1647         if (rc) {
1648                 CERROR("md_statfs fails: rc = %d\n", rc);
1649                 RETURN(rc);
1650         }
1651
1652         osfs->os_type = sb->s_magic;
1653
1654         CDEBUG(D_SUPER, "MDC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1655                osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,osfs->os_files);
1656
1657         rc = obd_statfs_rqset(class_exp2obd(sbi->ll_dt_exp),
1658                               &obd_osfs, max_age, flags);
1659         if (rc) {
1660                 CERROR("obd_statfs fails: rc = %d\n", rc);
1661                 RETURN(rc);
1662         }
1663
1664         CDEBUG(D_SUPER, "OSC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1665                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1666                obd_osfs.os_files);
1667
1668         osfs->os_bsize = obd_osfs.os_bsize;
1669         osfs->os_blocks = obd_osfs.os_blocks;
1670         osfs->os_bfree = obd_osfs.os_bfree;
1671         osfs->os_bavail = obd_osfs.os_bavail;
1672
1673         /* If we don't have as many objects free on the OST as inodes
1674          * on the MDS, we reduce the total number of inodes to
1675          * compensate, so that the "inodes in use" number is correct.
1676          */
1677         if (obd_osfs.os_ffree < osfs->os_ffree) {
1678                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1679                         obd_osfs.os_ffree;
1680                 osfs->os_ffree = obd_osfs.os_ffree;
1681         }
1682
1683         RETURN(rc);
1684 }
1685 #ifndef HAVE_STATFS_DENTRY_PARAM
1686 int ll_statfs(struct super_block *sb, struct kstatfs *sfs)
1687 {
1688 #else
1689 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1690 {
1691         struct super_block *sb = de->d_sb;
1692 #endif
1693         struct obd_statfs osfs;
1694         int rc;
1695
1696         CDEBUG(D_VFSTRACE, "VFS Op: at "LPU64" jiffies\n", get_jiffies_64());
1697         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1698
1699         /* For now we will always get up-to-date statfs values, but in the
1700          * future we may allow some amount of caching on the client (e.g.
1701          * from QOS or lprocfs updates). */
1702         rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - 1, 0);
1703         if (rc)
1704                 return rc;
1705
1706         statfs_unpack(sfs, &osfs);
1707
1708         /* We need to downshift for all 32-bit kernels, because we can't
1709          * tell if the kernel is being called via sys_statfs64() or not.
1710          * Stop before overflowing f_bsize - in which case it is better
1711          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
1712         if (sizeof(long) < 8) {
1713                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1714                         sfs->f_bsize <<= 1;
1715
1716                         osfs.os_blocks >>= 1;
1717                         osfs.os_bfree >>= 1;
1718                         osfs.os_bavail >>= 1;
1719                 }
1720         }
1721
1722         sfs->f_blocks = osfs.os_blocks;
1723         sfs->f_bfree = osfs.os_bfree;
1724         sfs->f_bavail = osfs.os_bavail;
1725
1726         return 0;
1727 }
1728
1729 void ll_inode_size_lock(struct inode *inode, int lock_lsm)
1730 {
1731         struct ll_inode_info *lli;
1732         struct lov_stripe_md *lsm;
1733
1734         lli = ll_i2info(inode);
1735         LASSERT(lli->lli_size_sem_owner != current);
1736         down(&lli->lli_size_sem);
1737         LASSERT(lli->lli_size_sem_owner == NULL);
1738         lli->lli_size_sem_owner = current;
1739         lsm = lli->lli_smd;
1740         LASSERTF(lsm != NULL || lock_lsm == 0, "lsm %p, lock_lsm %d\n",
1741                  lsm, lock_lsm);
1742         if (lock_lsm)
1743                 lov_stripe_lock(lsm);
1744 }
1745
1746 void ll_inode_size_unlock(struct inode *inode, int unlock_lsm)
1747 {
1748         struct ll_inode_info *lli;
1749         struct lov_stripe_md *lsm;
1750
1751         lli = ll_i2info(inode);
1752         lsm = lli->lli_smd;
1753         LASSERTF(lsm != NULL || unlock_lsm == 0, "lsm %p, lock_lsm %d\n",
1754                  lsm, unlock_lsm);
1755         if (unlock_lsm)
1756                 lov_stripe_unlock(lsm);
1757         LASSERT(lli->lli_size_sem_owner == current);
1758         lli->lli_size_sem_owner = NULL;
1759         up(&lli->lli_size_sem);
1760 }
1761
1762 static void ll_replace_lsm(struct inode *inode, struct lov_stripe_md *lsm)
1763 {
1764         struct ll_inode_info *lli = ll_i2info(inode);
1765
1766         dump_lsm(D_INODE, lsm);
1767         dump_lsm(D_INODE, lli->lli_smd);
1768         LASSERTF(lsm->lsm_magic == LOV_MAGIC_JOIN,
1769                  "lsm must be joined lsm %p\n", lsm);
1770         obd_free_memmd(ll_i2dtexp(inode), &lli->lli_smd);
1771         CDEBUG(D_INODE, "replace lsm %p to lli_smd %p for inode %lu%u(%p)\n",
1772                lsm, lli->lli_smd, inode->i_ino, inode->i_generation, inode);
1773         lli->lli_smd = lsm;
1774         lli->lli_maxbytes = lsm->lsm_maxbytes;
1775         if (lli->lli_maxbytes > PAGE_CACHE_MAXBYTES)
1776                 lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
1777 }
1778
1779 void ll_update_inode(struct inode *inode, struct lustre_md *md)
1780 {
1781         struct ll_inode_info *lli = ll_i2info(inode);
1782         struct mdt_body *body = md->body;
1783         struct lov_stripe_md *lsm = md->lsm;
1784         struct ll_sb_info *sbi = ll_i2sbi(inode);
1785
1786         LASSERT ((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
1787         if (lsm != NULL) {
1788                 if (lli->lli_smd == NULL) {
1789                         if (lsm->lsm_magic != LOV_MAGIC &&
1790                             lsm->lsm_magic != LOV_MAGIC_JOIN) {
1791                                 dump_lsm(D_ERROR, lsm);
1792                                 LBUG();
1793                         }
1794                         CDEBUG(D_INODE, "adding lsm %p to inode %lu/%u(%p)\n",
1795                                lsm, inode->i_ino, inode->i_generation, inode);
1796                         /* ll_inode_size_lock() requires it is only called
1797                          * with lli_smd != NULL or lock_lsm == 0 or we can
1798                          * race between lock/unlock.  bug 9547 */
1799                         lli->lli_smd = lsm;
1800                         lli->lli_maxbytes = lsm->lsm_maxbytes;
1801                         if (lli->lli_maxbytes > PAGE_CACHE_MAXBYTES)
1802                                 lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
1803                 } else {
1804                         if (lli->lli_smd->lsm_magic == lsm->lsm_magic &&
1805                              lli->lli_smd->lsm_stripe_count ==
1806                                         lsm->lsm_stripe_count) {
1807                                 if (lov_stripe_md_cmp(lli->lli_smd, lsm)) {
1808                                         CERROR("lsm mismatch for inode %ld\n",
1809                                                 inode->i_ino);
1810                                         CERROR("lli_smd:\n");
1811                                         dump_lsm(D_ERROR, lli->lli_smd);
1812                                         CERROR("lsm:\n");
1813                                         dump_lsm(D_ERROR, lsm);
1814                                         LBUG();
1815                                 }
1816                         } else
1817                                 ll_replace_lsm(inode, lsm);
1818                 }
1819                 if (lli->lli_smd != lsm)
1820                         obd_free_memmd(ll_i2dtexp(inode), &lsm);
1821         }
1822
1823         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1824                 if (body->valid & OBD_MD_FLRMTPERM)
1825                         ll_update_remote_perm(inode, md->remote_perm);
1826         }
1827 #ifdef CONFIG_FS_POSIX_ACL
1828         else if (body->valid & OBD_MD_FLACL) {
1829                 spin_lock(&lli->lli_lock);
1830                 if (lli->lli_posix_acl)
1831                         posix_acl_release(lli->lli_posix_acl);
1832                 lli->lli_posix_acl = md->posix_acl;
1833                 spin_unlock(&lli->lli_lock);
1834         }
1835 #endif
1836         inode->i_ino = ll_fid_build_ino(sbi, &body->fid1);
1837
1838         if (body->valid & OBD_MD_FLATIME &&
1839             body->atime > LTIME_S(inode->i_atime))
1840                 LTIME_S(inode->i_atime) = body->atime;
1841         
1842         /* mtime is always updated with ctime, but can be set in past.
1843            As write and utime(2) may happen within 1 second, and utime's
1844            mtime has a priority over write's one, so take mtime from mds 
1845            for the same ctimes. */
1846         if (body->valid & OBD_MD_FLCTIME &&
1847             body->ctime >= LTIME_S(inode->i_ctime)) {
1848                 LTIME_S(inode->i_ctime) = body->ctime;
1849                 if (body->valid & OBD_MD_FLMTIME) {
1850                         CDEBUG(D_INODE, "setting ino %lu mtime "
1851                                "from %lu to "LPU64"\n", inode->i_ino, 
1852                                LTIME_S(inode->i_mtime), body->mtime);
1853                         LTIME_S(inode->i_mtime) = body->mtime;
1854                 }
1855         }
1856         if (body->valid & OBD_MD_FLMODE)
1857                 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1858         if (body->valid & OBD_MD_FLTYPE)
1859                 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1860         if (S_ISREG(inode->i_mode)) {
1861                 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, LL_MAX_BLKSIZE_BITS);
1862         } else {
1863                 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1864         }
1865 #ifdef HAVE_INODE_BLKSIZE
1866         inode->i_blksize = 1<<inode->i_blkbits;
1867 #endif
1868         if (body->valid & OBD_MD_FLUID)
1869                 inode->i_uid = body->uid;
1870         if (body->valid & OBD_MD_FLGID)
1871                 inode->i_gid = body->gid;
1872         if (body->valid & OBD_MD_FLFLAGS)
1873                 inode->i_flags = ll_ext_to_inode_flags(body->flags);
1874         if (body->valid & OBD_MD_FLNLINK)
1875                 inode->i_nlink = body->nlink;
1876         if (body->valid & OBD_MD_FLRDEV)
1877                 inode->i_rdev = old_decode_dev(body->rdev);
1878
1879         if (body->valid & OBD_MD_FLID) {
1880                 /* FID shouldn't be changed! */
1881                 if (fid_is_sane(&lli->lli_fid)) {
1882                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1883                                  "Trying to change FID "DFID
1884                                  " to the "DFID", inode %lu/%u(%p)\n",
1885                                  PFID(&lli->lli_fid), PFID(&body->fid1),
1886                                  inode->i_ino, inode->i_generation, inode);
1887                 } else 
1888                         lli->lli_fid = body->fid1;
1889         }
1890
1891         LASSERT(fid_seq(&lli->lli_fid) != 0);
1892
1893         if (body->valid & OBD_MD_FLSIZE) {
1894                 if ((ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM) &&
1895                     S_ISREG(inode->i_mode) && lli->lli_smd) {
1896                         struct lustre_handle lockh;
1897                         ldlm_mode_t mode;
1898                         
1899                         /* As it is possible a blocking ast has been processed
1900                          * by this time, we need to check there is an UPDATE 
1901                          * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1902                          * it. */
1903                         mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1904                                                &lockh);
1905                         if (mode) {
1906                                 if (lli->lli_flags & (LLIF_DONE_WRITING |
1907                                                       LLIF_EPOCH_PENDING |
1908                                                       LLIF_SOM_DIRTY)) {
1909                                         CERROR("ino %lu flags %lu still has "
1910                                                "size authority! do not trust "
1911                                                "the size got from MDS\n",
1912                                                inode->i_ino, lli->lli_flags);
1913                                 } else {
1914                                         /* Use old size assignment to avoid
1915                                          * deadlock bz14138 & bz14326 */
1916                                         inode->i_size = body->size;
1917                                         lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1918                                 }
1919                                 ldlm_lock_decref(&lockh, mode);
1920                         }
1921                 } else {
1922                         /* Use old size assignment to avoid
1923                          * deadlock bz14138 & bz14326 */
1924                         inode->i_size = body->size;
1925                 }
1926
1927                 if (body->valid & OBD_MD_FLBLOCKS)
1928                         inode->i_blocks = body->blocks;
1929         }
1930
1931         if (body->valid & OBD_MD_FLMDSCAPA) {
1932                 LASSERT(md->mds_capa);
1933                 ll_add_capa(inode, md->mds_capa);
1934         }
1935         if (body->valid & OBD_MD_FLOSSCAPA) {
1936                 LASSERT(md->oss_capa);
1937                 ll_add_capa(inode, md->oss_capa);
1938         }
1939 }
1940
1941 static struct backing_dev_info ll_backing_dev_info = {
1942         .ra_pages       = 0,    /* No readahead */
1943 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
1944         .capabilities   = 0,    /* Does contribute to dirty memory */
1945 #else
1946         .memory_backed  = 0,    /* Does contribute to dirty memory */
1947 #endif
1948 };
1949
1950 void ll_read_inode2(struct inode *inode, void *opaque)
1951 {
1952         struct lustre_md *md = opaque;
1953         struct ll_inode_info *lli = ll_i2info(inode);
1954         ENTRY;
1955
1956         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
1957                inode->i_ino, inode->i_generation, inode);
1958
1959         ll_lli_init(lli);
1960
1961         LASSERT(!lli->lli_smd);
1962
1963         /* Core attributes from the MDS first.  This is a new inode, and
1964          * the VFS doesn't zero times in the core inode so we have to do
1965          * it ourselves.  They will be overwritten by either MDS or OST
1966          * attributes - we just need to make sure they aren't newer. */
1967         LTIME_S(inode->i_mtime) = 0;
1968         LTIME_S(inode->i_atime) = 0;
1969         LTIME_S(inode->i_ctime) = 0;
1970         inode->i_rdev = 0;
1971         ll_update_inode(inode, md);
1972
1973         /* OIDEBUG(inode); */
1974
1975         if (S_ISREG(inode->i_mode)) {
1976                 struct ll_sb_info *sbi = ll_i2sbi(inode);
1977                 inode->i_op = &ll_file_inode_operations;
1978                 inode->i_fop = sbi->ll_fop;
1979                 inode->i_mapping->a_ops = &ll_aops;
1980                 EXIT;
1981         } else if (S_ISDIR(inode->i_mode)) {
1982                 inode->i_op = &ll_dir_inode_operations;
1983                 inode->i_fop = &ll_dir_operations;
1984                 inode->i_mapping->a_ops = &ll_dir_aops;
1985                 EXIT;
1986         } else if (S_ISLNK(inode->i_mode)) {
1987                 inode->i_op = &ll_fast_symlink_inode_operations;
1988                 EXIT;
1989         } else {
1990                 inode->i_op = &ll_special_inode_operations;
1991
1992                 init_special_inode(inode, inode->i_mode,
1993                                    kdev_t_to_nr(inode->i_rdev));
1994
1995                 /* initializing backing dev info. */
1996                 inode->i_mapping->backing_dev_info = &ll_backing_dev_info;
1997
1998                 EXIT;
1999         }
2000 }
2001
2002 void ll_delete_inode(struct inode *inode)
2003 {
2004         struct ll_sb_info *sbi = ll_i2sbi(inode);
2005         int rc;
2006         ENTRY;
2007
2008         rc = obd_fid_delete(sbi->ll_md_exp, ll_inode2fid(inode));
2009         if (rc) {
2010                 CERROR("fid_delete() failed, rc %d\n", rc);
2011         }
2012         truncate_inode_pages(&inode->i_data, 0);
2013         clear_inode(inode);
2014
2015         EXIT;
2016 }
2017
2018 int ll_iocontrol(struct inode *inode, struct file *file,
2019                  unsigned int cmd, unsigned long arg)
2020 {
2021         struct ll_sb_info *sbi = ll_i2sbi(inode);
2022         struct ptlrpc_request *req = NULL;
2023         int rc, flags = 0;
2024         ENTRY;
2025
2026         switch(cmd) {
2027         case EXT3_IOC_GETFLAGS: {
2028                 struct mdt_body *body;
2029                 struct obd_capa *oc;
2030
2031                 oc = ll_mdscapa_get(inode);
2032                 rc = md_getattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
2033                                 OBD_MD_FLFLAGS, 0, &req);
2034                 capa_put(oc);
2035                 if (rc) {
2036                         CERROR("failure %d inode %lu\n", rc, inode->i_ino);
2037                         RETURN(-abs(rc));
2038                 }
2039
2040                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2041
2042                 flags = body->flags;
2043
2044                 ptlrpc_req_finished(req);
2045
2046                 RETURN(put_user(flags, (int *)arg));
2047         }
2048         case EXT3_IOC_SETFLAGS: {
2049                 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
2050                 struct obd_info oinfo = { { { 0 } } };
2051                 struct md_op_data *op_data;
2052
2053                 if (get_user(flags, (int *)arg))
2054                         RETURN(-EFAULT);
2055
2056                 oinfo.oi_md = lsm;
2057                 OBDO_ALLOC(oinfo.oi_oa);
2058                 if (!oinfo.oi_oa)
2059                         RETURN(-ENOMEM);
2060
2061                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2062                                              LUSTRE_OPC_ANY, NULL);
2063                 if (IS_ERR(op_data))
2064                         RETURN(PTR_ERR(op_data));
2065
2066                 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
2067                 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
2068                 rc = md_setattr(sbi->ll_md_exp, op_data,
2069                                 NULL, 0, NULL, 0, &req, NULL);
2070                 ll_finish_md_op_data(op_data);
2071                 ptlrpc_req_finished(req);
2072                 if (rc || lsm == NULL) {
2073                         OBDO_FREE(oinfo.oi_oa);
2074                         RETURN(rc);
2075                 }
2076
2077                 oinfo.oi_oa->o_id = lsm->lsm_object_id;
2078                 oinfo.oi_oa->o_gr = lsm->lsm_object_gr;
2079                 oinfo.oi_oa->o_flags = flags;
2080                 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS | 
2081                                        OBD_MD_FLGROUP;
2082                 oinfo.oi_capa = ll_mdscapa_get(inode);
2083
2084                 obdo_from_inode(oinfo.oi_oa, inode,
2085                                 OBD_MD_FLFID | OBD_MD_FLGENER);
2086                 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
2087                 capa_put(oinfo.oi_capa);
2088                 OBDO_FREE(oinfo.oi_oa);
2089                 if (rc) {
2090                         if (rc != -EPERM && rc != -EACCES)
2091                                 CERROR("md_setattr_async fails: rc = %d\n", rc);
2092                         RETURN(rc);
2093                 }
2094
2095                 inode->i_flags = ll_ext_to_inode_flags(flags |
2096                                                        MDS_BFLAG_EXT_FLAGS);
2097                 RETURN(0);
2098         }
2099         default:
2100                 RETURN(-ENOSYS);
2101         }
2102
2103         RETURN(0);
2104 }
2105
2106 int ll_flush_ctx(struct inode *inode)
2107 {
2108         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2109
2110         CDEBUG(D_SEC, "flush context for user %d\n", current->uid);
2111
2112         obd_set_info_async(sbi->ll_md_exp,
2113                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2114                            0, NULL, NULL);
2115         obd_set_info_async(sbi->ll_dt_exp,
2116                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2117                            0, NULL, NULL);
2118         return 0;
2119 }
2120
2121 /* umount -f client means force down, don't save state */
2122 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
2123 void ll_umount_begin(struct vfsmount *vfsmnt, int flags)
2124 {
2125         struct super_block *sb = vfsmnt->mnt_sb;
2126 #else
2127 void ll_umount_begin(struct super_block *sb)
2128 {
2129 #endif
2130         struct lustre_sb_info *lsi = s2lsi(sb);
2131         struct ll_sb_info *sbi = ll_s2sbi(sb);
2132         struct obd_device *obd;
2133         struct obd_ioctl_data ioc_data = { 0 };
2134         ENTRY;
2135
2136 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
2137         if (!(flags & MNT_FORCE)) {
2138                 EXIT;
2139                 return;
2140         }
2141 #endif
2142
2143         /* Tell the MGC we got umount -f */
2144         lsi->lsi_flags |= LSI_UMOUNT_FORCE;
2145
2146         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2147                sb->s_count, atomic_read(&sb->s_active));
2148
2149         obd = class_exp2obd(sbi->ll_md_exp);
2150         if (obd == NULL) {
2151                 CERROR("Invalid MDC connection handle "LPX64"\n",
2152                        sbi->ll_md_exp->exp_handle.h_cookie);
2153                 EXIT;
2154                 return;
2155         }
2156         obd->obd_force = 1;
2157         obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp, sizeof ioc_data,
2158                       &ioc_data, NULL);
2159
2160         obd = class_exp2obd(sbi->ll_dt_exp);
2161         if (obd == NULL) {
2162                 CERROR("Invalid LOV connection handle "LPX64"\n",
2163                        sbi->ll_dt_exp->exp_handle.h_cookie);
2164                 EXIT;
2165                 return;
2166         }
2167
2168         obd->obd_force = 1;
2169         obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp, sizeof ioc_data,
2170                       &ioc_data, NULL);
2171
2172         /* Really, we'd like to wait until there are no requests outstanding,
2173          * and then continue.  For now, we just invalidate the requests,
2174          * schedule, and hope.
2175          */
2176         schedule();
2177
2178         EXIT;
2179 }
2180
2181 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2182 {
2183         struct ll_sb_info *sbi = ll_s2sbi(sb);
2184         int err;
2185         __u32 read_only;
2186
2187         if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2188                 read_only = *flags & MS_RDONLY;
2189                 err = obd_set_info_async(sbi->ll_md_exp,
2190                                          sizeof(KEY_READ_ONLY),
2191                                          KEY_READ_ONLY, sizeof(read_only),
2192                                          &read_only, NULL);
2193                 if (err) {
2194                         CERROR("Failed to change the read-only flag during "
2195                                "remount: %d\n", err);
2196                         return err;
2197                 }
2198
2199                 if (read_only)
2200                         sb->s_flags |= MS_RDONLY;
2201                 else
2202                         sb->s_flags &= ~MS_RDONLY;
2203         }
2204         return 0;
2205 }
2206
2207 int ll_prep_inode(struct inode **inode,
2208                   struct ptlrpc_request *req,
2209                   struct super_block *sb)
2210 {
2211         struct ll_sb_info *sbi = NULL;
2212         struct lustre_md md;
2213         int rc = 0;
2214         ENTRY;
2215
2216         LASSERT(*inode || sb);
2217         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2218         prune_deathrow(sbi, 1);
2219         memset(&md, 0, sizeof(struct lustre_md));
2220
2221         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2222                               sbi->ll_md_exp, &md);
2223         if (rc)
2224                 RETURN(rc);
2225
2226         if (*inode) {
2227                 ll_update_inode(*inode, &md);
2228         } else {
2229                 LASSERT(sb != NULL);
2230
2231                 /*
2232                  * At this point server returns to client's same fid as client
2233                  * generated for creating. So using ->fid1 is okay here.
2234                  */
2235                 LASSERT(fid_is_sane(&md.body->fid1));
2236
2237                 *inode = ll_iget(sb, ll_fid_build_ino(sbi, &md.body->fid1), &md);
2238                 if (*inode == NULL || is_bad_inode(*inode)) {
2239                         if (md.lsm)
2240                                 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2241 #ifdef CONFIG_FS_POSIX_ACL
2242                         if (md.posix_acl) {
2243                                 posix_acl_release(md.posix_acl);
2244                                 md.posix_acl = NULL;
2245                         }
2246 #endif
2247                         rc = -ENOMEM;
2248                         CERROR("new_inode -fatal: rc %d\n", rc);
2249                         GOTO(out, rc);
2250                 }
2251         }
2252
2253         rc = obd_checkmd(sbi->ll_dt_exp, sbi->ll_md_exp,
2254                          ll_i2info(*inode)->lli_smd);
2255 out:
2256         md_free_lustre_md(sbi->ll_md_exp, &md);
2257         RETURN(rc);
2258 }
2259
2260 char *llap_origins[] = {
2261         [LLAP_ORIGIN_UNKNOWN] = "--",
2262         [LLAP_ORIGIN_READPAGE] = "rp",
2263         [LLAP_ORIGIN_READAHEAD] = "ra",
2264         [LLAP_ORIGIN_COMMIT_WRITE] = "cw",
2265         [LLAP_ORIGIN_WRITEPAGE] = "wp",
2266         [LLAP_ORIGIN_LOCKLESS_IO] = "ls"
2267 };
2268
2269 struct ll_async_page *llite_pglist_next_llap(struct list_head *head,
2270                                              struct list_head *list)
2271 {
2272         struct ll_async_page *llap;
2273         struct list_head *pos;
2274
2275         list_for_each(pos, list) {
2276                 if (pos == head)
2277                         return NULL;
2278                 llap = list_entry(pos, struct ll_async_page, llap_pglist_item);
2279                 if (llap->llap_page == NULL)
2280                         continue;
2281                 return llap;
2282         }
2283         LBUG();
2284         return NULL;
2285 }
2286
2287 int ll_obd_statfs(struct inode *inode, void *arg)
2288 {
2289         struct ll_sb_info *sbi = NULL;
2290         struct obd_export *exp;
2291         char *buf = NULL;
2292         struct obd_ioctl_data *data = NULL;
2293         __u32 type;
2294         int len = 0, rc;
2295
2296         if (!inode || !(sbi = ll_i2sbi(inode)))
2297                 GOTO(out_statfs, rc = -EINVAL);
2298
2299         rc = obd_ioctl_getdata(&buf, &len, arg);
2300         if (rc)
2301                 GOTO(out_statfs, rc);
2302
2303         data = (void*)buf;
2304         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2305             !data->ioc_pbuf1 || !data->ioc_pbuf2)
2306                 GOTO(out_statfs, rc = -EINVAL);
2307
2308         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2309         if (type == LL_STATFS_MDC)
2310                 exp = sbi->ll_md_exp;
2311         else if (type == LL_STATFS_LOV)
2312                 exp = sbi->ll_dt_exp;
2313         else 
2314                 GOTO(out_statfs, rc = -ENODEV);
2315
2316         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
2317         if (rc)
2318                 GOTO(out_statfs, rc);
2319 out_statfs:
2320         if (buf)
2321                 obd_ioctl_freedata(buf, len);
2322         return rc;
2323 }
2324
2325 int ll_process_config(struct lustre_cfg *lcfg)
2326 {
2327         char *ptr;
2328         void *sb;
2329         struct lprocfs_static_vars lvars;
2330         unsigned long x; 
2331         int rc = 0;
2332
2333         lprocfs_llite_init_vars(&lvars);
2334
2335         /* The instance name contains the sb: lustre-client-aacfe000 */
2336         ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2337         if (!ptr || !*(++ptr)) 
2338                 return -EINVAL;
2339         if (sscanf(ptr, "%lx", &x) != 1)
2340                 return -EINVAL;
2341         sb = (void *)x;
2342         /* This better be a real Lustre superblock! */
2343         LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2344
2345         /* Note we have not called client_common_fill_super yet, so 
2346            proc fns must be able to handle that! */
2347         rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2348                                       lcfg, sb);
2349         return(rc);
2350 }
2351
2352 /* this function prepares md_op_data hint for passing ot down to MD stack. */
2353 struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data,
2354                                        struct inode *i1, struct inode *i2,
2355                                        const char *name, int namelen,
2356                                        int mode, __u32 opc, void *data)
2357 {
2358         LASSERT(i1 != NULL);
2359
2360         if (namelen > ll_i2sbi(i1)->ll_namelen)
2361                 return ERR_PTR(-ENAMETOOLONG);
2362         
2363         if (op_data == NULL)
2364                 OBD_ALLOC_PTR(op_data);
2365         
2366         if (op_data == NULL)
2367                 return ERR_PTR(-ENOMEM);
2368
2369         ll_i2gids(op_data->op_suppgids, i1, i2);
2370         op_data->op_fid1 = *ll_inode2fid(i1);
2371         op_data->op_capa1 = ll_mdscapa_get(i1);
2372
2373         if (i2) {
2374                 op_data->op_fid2 = *ll_inode2fid(i2);
2375                 op_data->op_capa2 = ll_mdscapa_get(i2);
2376         } else {
2377                 fid_zero(&op_data->op_fid2);
2378                 op_data->op_capa2 = NULL;
2379         }
2380
2381         op_data->op_name = name;
2382         op_data->op_namelen = namelen;
2383         op_data->op_mode = mode;
2384         op_data->op_mod_time = cfs_time_current_sec();
2385         op_data->op_fsuid = current->fsuid;
2386         op_data->op_fsgid = current->fsgid;
2387         op_data->op_cap = current->cap_effective;
2388         op_data->op_bias = MDS_CHECK_SPLIT;
2389         op_data->op_opc = opc;
2390         op_data->op_mds = 0;
2391         op_data->op_data = data;
2392
2393         return op_data;
2394 }
2395
2396 void ll_finish_md_op_data(struct md_op_data *op_data)
2397 {
2398         capa_put(op_data->op_capa1);
2399         capa_put(op_data->op_capa2);
2400         OBD_FREE_PTR(op_data);
2401 }