Whamcloud - gitweb
dad4ec274856e54e4d750888f3699608e7fc1431
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/llite/llite_lib.c
37  *
38  * Lustre Light Super operations
39  */
40
41 #define DEBUG_SUBSYSTEM S_LLITE
42
43 #include <linux/module.h>
44 #include <linux/types.h>
45 #include <linux/random.h>
46 #include <linux/version.h>
47 #include <linux/mm.h>
48
49 #include <lustre_lite.h>
50 #include <lustre_ha.h>
51 #include <lustre_dlm.h>
52 #include <lprocfs_status.h>
53 #include <lustre_disk.h>
54 #include <lustre_param.h>
55 #include <lustre_log.h>
56 #include <cl_object.h>
57 #include <obd_cksum.h>
58 #include "llite_internal.h"
59
60 cfs_mem_cache_t *ll_file_data_slab;
61
62 CFS_LIST_HEAD(ll_super_blocks);
63 cfs_spinlock_t ll_sb_lock = CFS_SPIN_LOCK_UNLOCKED;
64
65 extern struct address_space_operations ll_aops;
66 extern struct address_space_operations ll_dir_aops;
67
68 #ifndef log2
69 #define log2(n) cfs_ffz(~(n))
70 #endif
71
72 static struct ll_sb_info *ll_init_sbi(void)
73 {
74         struct ll_sb_info *sbi = NULL;
75         unsigned long pages;
76         struct sysinfo si;
77         class_uuid_t uuid;
78         int i;
79         ENTRY;
80
81         OBD_ALLOC(sbi, sizeof(*sbi));
82         if (!sbi)
83                 RETURN(NULL);
84
85         cfs_spin_lock_init(&sbi->ll_lock);
86         cfs_init_mutex(&sbi->ll_lco.lco_lock);
87         cfs_spin_lock_init(&sbi->ll_pp_extent_lock);
88         cfs_spin_lock_init(&sbi->ll_process_lock);
89         sbi->ll_rw_stats_on = 0;
90
91         si_meminfo(&si);
92         pages = si.totalram - si.totalhigh;
93         if (pages >> (20 - CFS_PAGE_SHIFT) < 512) {
94 #ifdef HAVE_BGL_SUPPORT
95                 sbi->ll_async_page_max = pages / 4;
96 #else
97                 sbi->ll_async_page_max = pages / 2;
98 #endif
99         } else {
100                 sbi->ll_async_page_max = (pages / 4) * 3;
101         }
102
103         sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
104                                            SBI_DEFAULT_READAHEAD_MAX);
105         sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
106         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
107                                            SBI_DEFAULT_READAHEAD_WHOLE_MAX;
108         CFS_INIT_LIST_HEAD(&sbi->ll_conn_chain);
109         CFS_INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
110
111         ll_generate_random_uuid(uuid);
112         class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
113         CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
114
115         cfs_spin_lock(&ll_sb_lock);
116         cfs_list_add_tail(&sbi->ll_list, &ll_super_blocks);
117         cfs_spin_unlock(&ll_sb_lock);
118
119 #ifdef ENABLE_LLITE_CHECKSUM
120         sbi->ll_flags |= LL_SBI_CHECKSUM;
121 #endif
122
123 #ifdef HAVE_LRU_RESIZE_SUPPORT
124         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
125 #endif
126
127 #ifdef HAVE_EXPORT___IGET
128         CFS_INIT_LIST_HEAD(&sbi->ll_deathrow);
129         cfs_spin_lock_init(&sbi->ll_deathrow_lock);
130 #endif
131         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
132                 cfs_spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i]. \
133                                    pp_r_hist.oh_lock);
134                 cfs_spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i]. \
135                                    pp_w_hist.oh_lock);
136         }
137
138         /* metadata statahead is enabled by default */
139         sbi->ll_sa_max = LL_SA_RPC_DEF;
140
141         RETURN(sbi);
142 }
143
144 void ll_free_sbi(struct super_block *sb)
145 {
146         struct ll_sb_info *sbi = ll_s2sbi(sb);
147         ENTRY;
148
149         if (sbi != NULL) {
150                 cfs_spin_lock(&ll_sb_lock);
151                 cfs_list_del(&sbi->ll_list);
152                 cfs_spin_unlock(&ll_sb_lock);
153                 OBD_FREE(sbi, sizeof(*sbi));
154         }
155         EXIT;
156 }
157
158 static struct dentry_operations ll_d_root_ops = {
159         .d_compare = ll_dcompare,
160         .d_revalidate = ll_revalidate_nd,
161 };
162
163 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
164 {
165         struct inode *root = 0;
166         struct ll_sb_info *sbi = ll_s2sbi(sb);
167         struct obd_device *obd;
168         struct obd_capa *oc = NULL;
169         struct obd_statfs osfs;
170         struct ptlrpc_request *request = NULL;
171         struct obd_connect_data *data = NULL;
172         struct obd_uuid *uuid;
173         struct lustre_md lmd;
174         obd_valid valid;
175         int size, err, checksum;
176         ENTRY;
177
178         obd = class_name2obd(md);
179         if (!obd) {
180                 CERROR("MD %s: not setup or attached\n", md);
181                 RETURN(-EINVAL);
182         }
183
184         OBD_ALLOC_PTR(data);
185         if (data == NULL)
186                 RETURN(-ENOMEM);
187
188         if (proc_lustre_fs_root) {
189                 err = lprocfs_register_mountpoint(proc_lustre_fs_root, sb,
190                                                   dt, md);
191                 if (err < 0)
192                         CERROR("could not register mount in /proc/fs/lustre\n");
193         }
194
195         /* indicate the features supported by this client */
196         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
197                                   OBD_CONNECT_JOIN     | OBD_CONNECT_ATTRFID  |
198                                   OBD_CONNECT_VERSION  | OBD_CONNECT_MDS_CAPA |
199                                   OBD_CONNECT_OSS_CAPA | OBD_CONNECT_CANCELSET|
200                                   OBD_CONNECT_FID      | OBD_CONNECT_AT |
201                                   OBD_CONNECT_LOV_V3 | OBD_CONNECT_RMT_CLIENT |
202                                   OBD_CONNECT_VBR      | OBD_CONNECT_SOM;
203
204 #ifdef HAVE_LRU_RESIZE_SUPPORT
205         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
206                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
207 #endif
208 #ifdef CONFIG_FS_POSIX_ACL
209         data->ocd_connect_flags |= OBD_CONNECT_ACL;
210 #endif
211         data->ocd_ibits_known = MDS_INODELOCK_FULL;
212         data->ocd_version = LUSTRE_VERSION_CODE;
213
214         if (sb->s_flags & MS_RDONLY)
215                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
216         if (sbi->ll_flags & LL_SBI_USER_XATTR)
217                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
218
219 #ifdef HAVE_MS_FLOCK_LOCK
220         /* force vfs to use lustre handler for flock() calls - bug 10743 */
221         sb->s_flags |= MS_FLOCK_LOCK;
222 #endif
223
224         if (sbi->ll_flags & LL_SBI_FLOCK)
225                 sbi->ll_fop = &ll_file_operations_flock;
226         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
227                 sbi->ll_fop = &ll_file_operations;
228         else
229                 sbi->ll_fop = &ll_file_operations_noflock;
230
231         /* real client */
232         data->ocd_connect_flags |= OBD_CONNECT_REAL;
233         if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
234                 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
235
236         err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid, data, NULL);
237         if (err == -EBUSY) {
238                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
239                                    "recovery, of which this client is not a "
240                                    "part. Please wait for recovery to complete,"
241                                    " abort, or time out.\n", md);
242                 GOTO(out, err);
243         } else if (err) {
244                 CERROR("cannot connect to %s: rc = %d\n", md, err);
245                 GOTO(out, err);
246         }
247
248         err = obd_fid_init(sbi->ll_md_exp);
249         if (err) {
250                 CERROR("Can't init metadata layer FID infrastructure, "
251                        "rc %d\n", err);
252                 GOTO(out_md, err);
253         }
254
255         err = obd_statfs(obd, &osfs, cfs_time_current_64() - CFS_HZ, 0);
256         if (err)
257                 GOTO(out_md_fid, err);
258
259         size = sizeof(*data);
260         err = obd_get_info(sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
261                            KEY_CONN_DATA,  &size, data, NULL);
262         if (err) {
263                 CERROR("Get connect data failed: %d \n", err);
264                 GOTO(out_md, err);
265         }
266
267         LASSERT(osfs.os_bsize);
268         sb->s_blocksize = osfs.os_bsize;
269         sb->s_blocksize_bits = log2(osfs.os_bsize);
270         sb->s_magic = LL_SUPER_MAGIC;
271
272         /* for bug 11559. in $LINUX/fs/read_write.c, function do_sendfile():
273          *         retval = in_file->f_op->sendfile(...);
274          *         if (*ppos > max)
275          *                 retval = -EOVERFLOW;
276          *
277          * it will check if *ppos is greater than max. However, max equals to
278          * s_maxbytes, which is a negative integer in a x86_64 box since loff_t
279          * has been defined as a signed long long integer in linux kernel. */
280 #if BITS_PER_LONG == 64
281         sb->s_maxbytes = PAGE_CACHE_MAXBYTES >> 1;
282 #else
283         sb->s_maxbytes = PAGE_CACHE_MAXBYTES;
284 #endif
285         sbi->ll_namelen = osfs.os_namelen;
286         sbi->ll_max_rw_chunk = LL_DEFAULT_MAX_RW_CHUNK;
287
288         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
289             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
290                 LCONSOLE_INFO("Disabling user_xattr feature because "
291                               "it is not supported on the server\n");
292                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
293         }
294
295         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
296 #ifdef MS_POSIXACL
297                 sb->s_flags |= MS_POSIXACL;
298 #endif
299                 sbi->ll_flags |= LL_SBI_ACL;
300         } else {
301                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
302 #ifdef MS_POSIXACL
303                 sb->s_flags &= ~MS_POSIXACL;
304 #endif
305                 sbi->ll_flags &= ~LL_SBI_ACL;
306         }
307
308         if (data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) {
309                 if (!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
310                         sbi->ll_flags |= LL_SBI_RMT_CLIENT;
311                         LCONSOLE_INFO("client is set as remote by default.\n");
312                 }
313         } else {
314                 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
315                         sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
316                         LCONSOLE_INFO("client claims to be remote, but server "
317                                       "rejected, forced to be local.\n");
318                 }
319         }
320
321         if (data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) {
322                 LCONSOLE_INFO("client enabled MDS capability!\n");
323                 sbi->ll_flags |= LL_SBI_MDS_CAPA;
324         }
325
326         if (data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA) {
327                 LCONSOLE_INFO("client enabled OSS capability!\n");
328                 sbi->ll_flags |= LL_SBI_OSS_CAPA;
329         }
330
331         obd = class_name2obd(dt);
332         if (!obd) {
333                 CERROR("DT %s: not setup or attached\n", dt);
334                 GOTO(out_md_fid, err = -ENODEV);
335         }
336
337         data->ocd_connect_flags = OBD_CONNECT_GRANT     | OBD_CONNECT_VERSION  |
338                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
339                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID      |
340                                   OBD_CONNECT_SRVLOCK   | OBD_CONNECT_TRUNCLOCK|
341                                   OBD_CONNECT_AT | OBD_CONNECT_RMT_CLIENT |
342                                   OBD_CONNECT_OSS_CAPA | OBD_CONNECT_VBR|
343                                   OBD_CONNECT_SOM;
344
345         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
346                 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
347                  * disabled by default, because it can still be enabled on the
348                  * fly via /proc. As a consequence, we still need to come to an
349                  * agreement on the supported algorithms at connect time */
350                 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
351
352                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
353                         data->ocd_cksum_types = OBD_CKSUM_ADLER;
354                 else
355                         /* send the list of supported checksum types */
356                         data->ocd_cksum_types = OBD_CKSUM_ALL;
357         }
358
359 #ifdef HAVE_LRU_RESIZE_SUPPORT
360         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
361 #endif
362         if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
363                 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
364
365         CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d "
366                "ocd_grant: %d\n", data->ocd_connect_flags,
367                data->ocd_version, data->ocd_grant);
368
369         obd->obd_upcall.onu_owner = &sbi->ll_lco;
370         obd->obd_upcall.onu_upcall = cl_ocd_update;
371         data->ocd_brw_size = PTLRPC_MAX_BRW_PAGES << CFS_PAGE_SHIFT;
372
373         err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data, NULL);
374         if (err == -EBUSY) {
375                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
376                                    "recovery, of which this client is not a "
377                                    "part.  Please wait for recovery to "
378                                    "complete, abort, or time out.\n", dt);
379                 GOTO(out_md_fid, err);
380         } else if (err) {
381                 CERROR("Cannot connect to %s: rc = %d\n", dt, err);
382                 GOTO(out_md_fid, err);
383         }
384
385         err = obd_fid_init(sbi->ll_dt_exp);
386         if (err) {
387                 CERROR("Can't init data layer FID infrastructure, "
388                        "rc %d\n", err);
389                 GOTO(out_dt, err);
390         }
391
392         cfs_mutex_down(&sbi->ll_lco.lco_lock);
393         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
394         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
395         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
396         cfs_mutex_up(&sbi->ll_lco.lco_lock);
397
398         fid_zero(&sbi->ll_root_fid);
399         err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid, &oc);
400         if (err) {
401                 CERROR("cannot mds_connect: rc = %d\n", err);
402                 GOTO(out_lock_cn_cb, err);
403         }
404         if (!fid_is_sane(&sbi->ll_root_fid)) {
405                 CERROR("Invalid root fid during mount\n");
406                 GOTO(out_lock_cn_cb, err = -EINVAL);
407         }
408         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
409
410         sb->s_op = &lustre_super_operations;
411         sb->s_export_op = &lustre_export_operations;
412
413         /* make root inode
414          * XXX: move this to after cbd setup? */
415         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMDSCAPA;
416         if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
417                 valid |= OBD_MD_FLRMTPERM;
418         else if (sbi->ll_flags & LL_SBI_ACL)
419                 valid |= OBD_MD_FLACL;
420
421         err = md_getattr(sbi->ll_md_exp, &sbi->ll_root_fid, oc, valid, 0,
422                          &request);
423         if (oc)
424                 capa_put(oc);
425         if (err) {
426                 CERROR("md_getattr failed for root: rc = %d\n", err);
427                 GOTO(out_lock_cn_cb, err);
428         }
429         memset(&lmd, 0, sizeof(lmd));
430         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
431                                sbi->ll_md_exp, &lmd);
432         if (err) {
433                 CERROR("failed to understand root inode md: rc = %d\n", err);
434                 ptlrpc_req_finished (request);
435                 GOTO(out_lock_cn_cb, err);
436         }
437
438         LASSERT(fid_is_sane(&sbi->ll_root_fid));
439         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid), &lmd);
440         md_free_lustre_md(sbi->ll_md_exp, &lmd);
441         ptlrpc_req_finished(request);
442
443         if (root == NULL || IS_ERR(root)) {
444                 if (lmd.lsm)
445                         obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
446 #ifdef CONFIG_FS_POSIX_ACL
447                 if (lmd.posix_acl) {
448                         posix_acl_release(lmd.posix_acl);
449                         lmd.posix_acl = NULL;
450                 }
451 #endif
452                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
453                 root = NULL;
454                 CERROR("lustre_lite: bad iget4 for root\n");
455                 GOTO(out_root, err);
456         }
457
458         err = ll_close_thread_start(&sbi->ll_lcq);
459         if (err) {
460                 CERROR("cannot start close thread: rc %d\n", err);
461                 GOTO(out_root, err);
462         }
463
464 #ifdef CONFIG_FS_POSIX_ACL
465         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
466                 rct_init(&sbi->ll_rct);
467                 et_init(&sbi->ll_et);
468         }
469 #endif
470
471         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
472         err = obd_set_info_async(sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
473                                  KEY_CHECKSUM, sizeof(checksum), &checksum,
474                                  NULL);
475         cl_sb_init(sb);
476
477         sb->s_root = d_alloc_root(root);
478         if (data != NULL)
479                 OBD_FREE(data, sizeof(*data));
480
481         sb->s_root->d_op = &ll_d_root_ops;
482
483         sbi->ll_sdev_orig = sb->s_dev;
484
485         /* We set sb->s_dev equal on all lustre clients in order to support
486          * NFS export clustering.  NFSD requires that the FSID be the same
487          * on all clients. */
488         /* s_dev is also used in lt_compare() to compare two fs, but that is
489          * only a node-local comparison. */
490         uuid = obd_get_uuid(sbi->ll_md_exp);
491         if (uuid != NULL)
492                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
493
494         RETURN(err);
495 out_root:
496         if (root)
497                 iput(root);
498 out_lock_cn_cb:
499         obd_fid_fini(sbi->ll_dt_exp);
500 out_dt:
501         obd_disconnect(sbi->ll_dt_exp);
502         sbi->ll_dt_exp = NULL;
503 out_md_fid:
504         obd_fid_fini(sbi->ll_md_exp);
505 out_md:
506         obd_disconnect(sbi->ll_md_exp);
507         sbi->ll_md_exp = NULL;
508 out:
509         if (data != NULL)
510                 OBD_FREE_PTR(data);
511         lprocfs_unregister_mountpoint(sbi);
512         return err;
513 }
514
515 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
516 {
517         int size, rc;
518
519         *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
520         size = sizeof(int);
521         rc = obd_get_info(sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
522                           KEY_MAX_EASIZE, &size, lmmsize, NULL);
523         if (rc)
524                 CERROR("Get max mdsize error rc %d \n", rc);
525
526         RETURN(rc);
527 }
528
529 void ll_dump_inode(struct inode *inode)
530 {
531         struct list_head *tmp;
532         int dentry_count = 0;
533
534         LASSERT(inode != NULL);
535
536         list_for_each(tmp, &inode->i_dentry)
537                 dentry_count++;
538
539         CERROR("inode %p dump: dev=%s ino=%lu mode=%o count=%u, %d dentries\n",
540                inode, ll_i2mdexp(inode)->exp_obd->obd_name, inode->i_ino,
541                inode->i_mode, atomic_read(&inode->i_count), dentry_count);
542 }
543
544 void lustre_dump_dentry(struct dentry *dentry, int recur)
545 {
546         struct list_head *tmp;
547         int subdirs = 0;
548
549         LASSERT(dentry != NULL);
550
551         list_for_each(tmp, &dentry->d_subdirs)
552                 subdirs++;
553
554         CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u,"
555                " flags=0x%x, fsdata=%p, %d subdirs\n", dentry,
556                dentry->d_name.len, dentry->d_name.name,
557                dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
558                dentry->d_parent, dentry->d_inode, atomic_read(&dentry->d_count),
559                dentry->d_flags, dentry->d_fsdata, subdirs);
560         if (dentry->d_inode != NULL)
561                 ll_dump_inode(dentry->d_inode);
562
563         if (recur == 0)
564                 return;
565
566        list_for_each(tmp, &dentry->d_subdirs) {
567                 struct dentry *d = list_entry(tmp, struct dentry, d_child);
568                 lustre_dump_dentry(d, recur - 1);
569         }
570 }
571
572 #ifdef HAVE_EXPORT___IGET
573 static void prune_dir_dentries(struct inode *inode)
574 {
575         struct dentry *dentry, *prev = NULL;
576
577         /* due to lustre specific logic, a directory
578          * can have few dentries - a bug from VFS POV */
579 restart:
580         spin_lock(&dcache_lock);
581         if (!list_empty(&inode->i_dentry)) {
582                 dentry = list_entry(inode->i_dentry.prev,
583                                     struct dentry, d_alias);
584                 /* in order to prevent infinite loops we
585                  * break if previous dentry is busy */
586                 if (dentry != prev) {
587                         prev = dentry;
588                         dget_locked(dentry);
589                         spin_unlock(&dcache_lock);
590
591                         /* try to kill all child dentries */
592                         lock_dentry(dentry);
593                         shrink_dcache_parent(dentry);
594                         unlock_dentry(dentry);
595                         dput(dentry);
596
597                         /* now try to get rid of current dentry */
598                         d_prune_aliases(inode);
599                         goto restart;
600                 }
601         }
602         spin_unlock(&dcache_lock);
603 }
604
605 static void prune_deathrow_one(struct ll_inode_info *lli)
606 {
607         struct inode *inode = ll_info2i(lli);
608
609         /* first, try to drop any dentries - they hold a ref on the inode */
610         if (S_ISDIR(inode->i_mode))
611                 prune_dir_dentries(inode);
612         else
613                 d_prune_aliases(inode);
614
615
616         /* if somebody still uses it, leave it */
617         LASSERT(atomic_read(&inode->i_count) > 0);
618         if (atomic_read(&inode->i_count) > 1)
619                 goto out;
620
621         CDEBUG(D_INODE, "inode %lu/%u(%d) looks a good candidate for prune\n",
622                inode->i_ino,inode->i_generation,
623                atomic_read(&inode->i_count));
624
625         /* seems nobody uses it anymore */
626         inode->i_nlink = 0;
627
628 out:
629         iput(inode);
630         return;
631 }
632
633 static void prune_deathrow(struct ll_sb_info *sbi, int try)
634 {
635         struct ll_inode_info *lli;
636         int empty;
637
638         do {
639                 if (need_resched() && try)
640                         break;
641
642                 if (try) {
643                         if (!cfs_spin_trylock(&sbi->ll_deathrow_lock))
644                                 break;
645                 } else {
646                         cfs_spin_lock(&sbi->ll_deathrow_lock);
647                 }
648
649                 empty = 1;
650                 lli = NULL;
651                 if (!cfs_list_empty(&sbi->ll_deathrow)) {
652                         lli = cfs_list_entry(sbi->ll_deathrow.next,
653                                              struct ll_inode_info,
654                                              lli_dead_list);
655                         cfs_list_del_init(&lli->lli_dead_list);
656                         if (!cfs_list_empty(&sbi->ll_deathrow))
657                                 empty = 0;
658                 }
659                 cfs_spin_unlock(&sbi->ll_deathrow_lock);
660
661                 if (lli)
662                         prune_deathrow_one(lli);
663
664         } while (empty == 0);
665 }
666 #else /* !HAVE_EXPORT___IGET */
667 #define prune_deathrow(sbi, try) do {} while (0)
668 #endif /* HAVE_EXPORT___IGET */
669
670 void client_common_put_super(struct super_block *sb)
671 {
672         struct ll_sb_info *sbi = ll_s2sbi(sb);
673         ENTRY;
674
675 #ifdef CONFIG_FS_POSIX_ACL
676         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
677                 et_fini(&sbi->ll_et);
678                 rct_fini(&sbi->ll_rct);
679         }
680 #endif
681
682         obd_cancel_unused(sbi->ll_dt_exp, NULL, 0, NULL);
683
684         ll_close_thread_shutdown(sbi->ll_lcq);
685
686         cl_sb_fini(sb);
687
688         /* destroy inodes in deathrow */
689         prune_deathrow(sbi, 0);
690
691         cfs_list_del(&sbi->ll_conn_chain);
692
693         obd_fid_fini(sbi->ll_dt_exp);
694         obd_disconnect(sbi->ll_dt_exp);
695         sbi->ll_dt_exp = NULL;
696
697         lprocfs_unregister_mountpoint(sbi);
698
699         obd_fid_fini(sbi->ll_md_exp);
700         obd_disconnect(sbi->ll_md_exp);
701         sbi->ll_md_exp = NULL;
702
703         EXIT;
704 }
705
706 void ll_kill_super(struct super_block *sb)
707 {
708         struct ll_sb_info *sbi;
709
710         ENTRY;
711
712         /* not init sb ?*/
713         if (!(sb->s_flags & MS_ACTIVE))
714                 return;
715
716         sbi = ll_s2sbi(sb);
717         /* we need restore s_dev from changed for clustred NFS before put_super
718          * because new kernels have cached s_dev and change sb->s_dev in
719          * put_super not affected real removing devices */
720         if (sbi)
721                 sb->s_dev = sbi->ll_sdev_orig;
722         EXIT;
723 }
724
725 char *ll_read_opt(const char *opt, char *data)
726 {
727         char *value;
728         char *retval;
729         ENTRY;
730
731         CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
732         if (strncmp(opt, data, strlen(opt)))
733                 RETURN(NULL);
734         if ((value = strchr(data, '=')) == NULL)
735                 RETURN(NULL);
736
737         value++;
738         OBD_ALLOC(retval, strlen(value) + 1);
739         if (!retval) {
740                 CERROR("out of memory!\n");
741                 RETURN(NULL);
742         }
743
744         memcpy(retval, value, strlen(value)+1);
745         CDEBUG(D_SUPER, "Assigned option: %s, value %s\n", opt, retval);
746         RETURN(retval);
747 }
748
749 static inline int ll_set_opt(const char *opt, char *data, int fl)
750 {
751         if (strncmp(opt, data, strlen(opt)) != 0)
752                 return(0);
753         else
754                 return(fl);
755 }
756
757 /* non-client-specific mount options are parsed in lmd_parse */
758 static int ll_options(char *options, int *flags)
759 {
760         int tmp;
761         char *s1 = options, *s2;
762         ENTRY;
763
764         if (!options)
765                 RETURN(0);
766
767         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
768
769         while (*s1) {
770                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
771                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
772                 if (tmp) {
773                         *flags |= tmp;
774                         goto next;
775                 }
776                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
777                 if (tmp) {
778                         *flags |= tmp;
779                         goto next;
780                 }
781                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
782                 if (tmp) {
783                         *flags |= tmp;
784                         goto next;
785                 }
786                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
787                 if (tmp) {
788                         *flags &= ~tmp;
789                         goto next;
790                 }
791                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
792                 if (tmp) {
793                         *flags |= tmp;
794                         goto next;
795                 }
796                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
797                 if (tmp) {
798                         *flags &= ~tmp;
799                         goto next;
800                 }
801                 tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
802                 if (tmp) {
803                         /* Ignore deprecated mount option.  The client will
804                          * always try to mount with ACL support, whether this
805                          * is used depends on whether server supports it. */
806                         goto next;
807                 }
808                 tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
809                 if (tmp) {
810                         goto next;
811                 }
812                 tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
813                 if (tmp) {
814                         *flags |= tmp;
815                         goto next;
816                 }
817
818                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
819                 if (tmp) {
820                         *flags |= tmp;
821                         goto next;
822                 }
823                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
824                 if (tmp) {
825                         *flags &= ~tmp;
826                         goto next;
827                 }
828                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
829                 if (tmp) {
830                         *flags |= tmp;
831                         goto next;
832                 }
833                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
834                 if (tmp) {
835                         *flags &= ~tmp;
836                         goto next;
837                 }
838                 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
839                 if (tmp) {
840                         *flags |= tmp;
841                         goto next;
842                 }
843                 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
844                 if (tmp) {
845                         *flags &= ~tmp;
846                         goto next;
847                 }
848
849                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
850                                    s1);
851                 RETURN(-EINVAL);
852
853 next:
854                 /* Find next opt */
855                 s2 = strchr(s1, ',');
856                 if (s2 == NULL)
857                         break;
858                 s1 = s2 + 1;
859         }
860         RETURN(0);
861 }
862
863 void ll_lli_init(struct ll_inode_info *lli)
864 {
865         lli->lli_inode_magic = LLI_INODE_MAGIC;
866         cfs_sema_init(&lli->lli_size_sem, 1);
867         cfs_sema_init(&lli->lli_write_sem, 1);
868         cfs_sema_init(&lli->lli_trunc_sem, 1);
869         lli->lli_flags = 0;
870         lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
871         cfs_spin_lock_init(&lli->lli_lock);
872         CFS_INIT_LIST_HEAD(&lli->lli_close_list);
873         lli->lli_inode_magic = LLI_INODE_MAGIC;
874         cfs_sema_init(&lli->lli_och_sem, 1);
875         lli->lli_mds_read_och = lli->lli_mds_write_och = NULL;
876         lli->lli_mds_exec_och = NULL;
877         lli->lli_open_fd_read_count = lli->lli_open_fd_write_count = 0;
878         lli->lli_open_fd_exec_count = 0;
879         CFS_INIT_LIST_HEAD(&lli->lli_dead_list);
880         lli->lli_remote_perms = NULL;
881         lli->lli_rmtperm_utime = 0;
882         cfs_sema_init(&lli->lli_rmtperm_sem, 1);
883         CFS_INIT_LIST_HEAD(&lli->lli_oss_capas);
884 }
885
886 int ll_fill_super(struct super_block *sb)
887 {
888         struct lustre_profile *lprof;
889         struct lustre_sb_info *lsi = s2lsi(sb);
890         struct ll_sb_info *sbi;
891         char  *dt = NULL, *md = NULL;
892         char  *profilenm = get_profile_name(sb);
893         struct config_llog_instance cfg = {0, };
894         char   ll_instance[sizeof(sb) * 2 + 1];
895         int    err;
896         ENTRY;
897
898         CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
899
900         cfs_module_get();
901
902         /* client additional sb info */
903         lsi->lsi_llsbi = sbi = ll_init_sbi();
904         if (!sbi) {
905                 cfs_module_put(THIS_MODULE);
906                 RETURN(-ENOMEM);
907         }
908
909         err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
910         if (err)
911                 GOTO(out_free, err);
912
913         /* Generate a string unique to this super, in case some joker tries
914            to mount the same fs at two mount points.
915            Use the address of the super itself.*/
916         sprintf(ll_instance, "%p", sb);
917         cfg.cfg_instance = ll_instance;
918         cfg.cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
919
920         /* set up client obds */
921         err = lustre_process_log(sb, profilenm, &cfg);
922         if (err < 0) {
923                 CERROR("Unable to process log: %d\n", err);
924                 GOTO(out_free, err);
925         }
926
927         lprof = class_get_profile(profilenm);
928         if (lprof == NULL) {
929                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
930                                    " read from the MGS.  Does that filesystem "
931                                    "exist?\n", profilenm);
932                 GOTO(out_free, err = -EINVAL);
933         }
934         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
935                lprof->lp_md, lprof->lp_dt);
936
937         OBD_ALLOC(dt, strlen(lprof->lp_dt) +
938                   strlen(ll_instance) + 2);
939         if (!dt)
940                 GOTO(out_free, err = -ENOMEM);
941         sprintf(dt, "%s-%s", lprof->lp_dt, ll_instance);
942
943         OBD_ALLOC(md, strlen(lprof->lp_md) +
944                   strlen(ll_instance) + 2);
945         if (!md)
946                 GOTO(out_free, err = -ENOMEM);
947         sprintf(md, "%s-%s", lprof->lp_md, ll_instance);
948
949         /* connections, registrations, sb setup */
950         err = client_common_fill_super(sb, md, dt);
951
952 out_free:
953         if (md)
954                 OBD_FREE(md, strlen(md) + 1);
955         if (dt)
956                 OBD_FREE(dt, strlen(dt) + 1);
957         if (err)
958                 ll_put_super(sb);
959         else
960                 LCONSOLE_WARN("Client %s has started\n", profilenm);
961
962         RETURN(err);
963 } /* ll_fill_super */
964
965
966 void lu_context_keys_dump(void);
967
968 void ll_put_super(struct super_block *sb)
969 {
970         struct config_llog_instance cfg;
971         char   ll_instance[sizeof(sb) * 2 + 1];
972         struct obd_device *obd;
973         struct lustre_sb_info *lsi = s2lsi(sb);
974         struct ll_sb_info *sbi = ll_s2sbi(sb);
975         char *profilenm = get_profile_name(sb);
976         int force = 1, next;
977         ENTRY;
978
979         CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
980
981         ll_print_capa_stat(sbi);
982
983         sprintf(ll_instance, "%p", sb);
984         cfg.cfg_instance = ll_instance;
985         lustre_end_log(sb, NULL, &cfg);
986
987         if (sbi->ll_md_exp) {
988                 obd = class_exp2obd(sbi->ll_md_exp);
989                 if (obd)
990                         force = obd->obd_force;
991         }
992
993         /* We need to set force before the lov_disconnect in
994            lustre_common_put_super, since l_d cleans up osc's as well. */
995         if (force) {
996                 next = 0;
997                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
998                                                      &next)) != NULL) {
999                         obd->obd_force = force;
1000                 }
1001         }
1002
1003         if (sbi->ll_lcq) {
1004                 /* Only if client_common_fill_super succeeded */
1005                 client_common_put_super(sb);
1006         }
1007
1008         next = 0;
1009         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) {
1010                 class_manual_cleanup(obd);
1011         }
1012
1013         if (profilenm)
1014                 class_del_profile(profilenm);
1015
1016         ll_free_sbi(sb);
1017         lsi->lsi_llsbi = NULL;
1018
1019         lustre_common_put_super(sb);
1020
1021         cl_env_cache_purge(~0);
1022
1023         LCONSOLE_WARN("client %s umount complete\n", ll_instance);
1024
1025         cfs_module_put(THIS_MODULE);
1026
1027         EXIT;
1028 } /* client_put_super */
1029
1030 struct inode *ll_inode_from_lock(struct ldlm_lock *lock)
1031 {
1032         struct inode *inode = NULL;
1033         /* NOTE: we depend on atomic igrab() -bzzz */
1034         lock_res_and_lock(lock);
1035         if (lock->l_ast_data) {
1036                 struct ll_inode_info *lli = ll_i2info(lock->l_ast_data);
1037                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1038                         inode = igrab(lock->l_ast_data);
1039                 } else {
1040                         inode = lock->l_ast_data;
1041                         ldlm_lock_debug(NULL, inode->i_state & I_FREEING ?
1042                                                 D_INFO : D_WARNING,
1043                                         lock, __FILE__, __func__, __LINE__,
1044                                         "l_ast_data %p is bogus: magic %08x",
1045                                         lock->l_ast_data, lli->lli_inode_magic);
1046                         inode = NULL;
1047                 }
1048         }
1049         unlock_res_and_lock(lock);
1050         return inode;
1051 }
1052
1053 static int null_if_equal(struct ldlm_lock *lock, void *data)
1054 {
1055         if (data == lock->l_ast_data) {
1056                 lock->l_ast_data = NULL;
1057
1058                 if (lock->l_req_mode != lock->l_granted_mode)
1059                         LDLM_ERROR(lock,"clearing inode with ungranted lock");
1060         }
1061
1062         return LDLM_ITER_CONTINUE;
1063 }
1064
1065 void ll_clear_inode(struct inode *inode)
1066 {
1067         struct ll_inode_info *lli = ll_i2info(inode);
1068         struct ll_sb_info *sbi = ll_i2sbi(inode);
1069         ENTRY;
1070
1071         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1072                inode->i_generation, inode);
1073
1074         if (S_ISDIR(inode->i_mode)) {
1075                 /* these should have been cleared in ll_file_release */
1076                 LASSERT(lli->lli_sai == NULL);
1077                 LASSERT(lli->lli_opendir_key == NULL);
1078                 LASSERT(lli->lli_opendir_pid == 0);
1079         }
1080
1081         ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1082         md_change_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
1083                          null_if_equal, inode);
1084
1085         LASSERT(!lli->lli_open_fd_write_count);
1086         LASSERT(!lli->lli_open_fd_read_count);
1087         LASSERT(!lli->lli_open_fd_exec_count);
1088
1089         if (lli->lli_mds_write_och)
1090                 ll_md_real_close(inode, FMODE_WRITE);
1091         if (lli->lli_mds_exec_och)
1092                 ll_md_real_close(inode, FMODE_EXEC);
1093         if (lli->lli_mds_read_och)
1094                 ll_md_real_close(inode, FMODE_READ);
1095
1096         if (lli->lli_symlink_name) {
1097                 OBD_FREE(lli->lli_symlink_name,
1098                          strlen(lli->lli_symlink_name) + 1);
1099                 lli->lli_symlink_name = NULL;
1100         }
1101
1102         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1103                 LASSERT(lli->lli_posix_acl == NULL);
1104                 if (lli->lli_remote_perms) {
1105                         free_rmtperm_hash(lli->lli_remote_perms);
1106                         lli->lli_remote_perms = NULL;
1107                 }
1108         }
1109 #ifdef CONFIG_FS_POSIX_ACL
1110         else if (lli->lli_posix_acl) {
1111                 LASSERT(cfs_atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
1112                 LASSERT(lli->lli_remote_perms == NULL);
1113                 posix_acl_release(lli->lli_posix_acl);
1114                 lli->lli_posix_acl = NULL;
1115         }
1116 #endif
1117         lli->lli_inode_magic = LLI_INODE_DEAD;
1118
1119 #ifdef HAVE_EXPORT___IGET
1120         cfs_spin_lock(&sbi->ll_deathrow_lock);
1121         cfs_list_del_init(&lli->lli_dead_list);
1122         cfs_spin_unlock(&sbi->ll_deathrow_lock);
1123 #endif
1124         ll_clear_inode_capas(inode);
1125         /*
1126          * XXX This has to be done before lsm is freed below, because
1127          * cl_object still uses inode lsm.
1128          */
1129         cl_inode_fini(inode);
1130
1131         if (lli->lli_smd) {
1132                 obd_free_memmd(sbi->ll_dt_exp, &lli->lli_smd);
1133                 lli->lli_smd = NULL;
1134         }
1135
1136
1137         EXIT;
1138 }
1139
1140 int ll_md_setattr(struct inode *inode, struct md_op_data *op_data,
1141                   struct md_open_data **mod)
1142 {
1143         struct lustre_md md;
1144         struct ll_sb_info *sbi = ll_i2sbi(inode);
1145         struct ptlrpc_request *request = NULL;
1146         int rc;
1147         ENTRY;
1148
1149         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1150                                      LUSTRE_OPC_ANY, NULL);
1151         if (IS_ERR(op_data))
1152                 RETURN(PTR_ERR(op_data));
1153
1154         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
1155                         &request, mod);
1156         if (rc) {
1157                 ptlrpc_req_finished(request);
1158                 if (rc == -ENOENT) {
1159                         inode->i_nlink = 0;
1160                         /* Unlinked special device node? Or just a race?
1161                          * Pretend we done everything. */
1162                         if (!S_ISREG(inode->i_mode) &&
1163                             !S_ISDIR(inode->i_mode))
1164                                 rc = inode_setattr(inode, &op_data->op_attr);
1165                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1166                         CERROR("md_setattr fails: rc = %d\n", rc);
1167                 }
1168                 RETURN(rc);
1169         }
1170
1171         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1172                               sbi->ll_md_exp, &md);
1173         if (rc) {
1174                 ptlrpc_req_finished(request);
1175                 RETURN(rc);
1176         }
1177
1178         /* We call inode_setattr to adjust timestamps.
1179          * If there is at least some data in file, we cleared ATTR_SIZE
1180          * above to avoid invoking vmtruncate, otherwise it is important
1181          * to call vmtruncate in inode_setattr to update inode->i_size
1182          * (bug 6196) */
1183         rc = inode_setattr(inode, &op_data->op_attr);
1184
1185         /* Extract epoch data if obtained. */
1186         op_data->op_handle = md.body->handle;
1187         op_data->op_ioepoch = md.body->ioepoch;
1188
1189         ll_update_inode(inode, &md);
1190         ptlrpc_req_finished(request);
1191
1192         RETURN(rc);
1193 }
1194
1195 /* Close IO epoch and send Size-on-MDS attribute update. */
1196 static int ll_setattr_done_writing(struct inode *inode,
1197                                    struct md_op_data *op_data,
1198                                    struct md_open_data *mod)
1199 {
1200         struct ll_inode_info *lli = ll_i2info(inode);
1201         int rc = 0;
1202         ENTRY;
1203
1204         LASSERT(op_data != NULL);
1205         if (!S_ISREG(inode->i_mode))
1206                 RETURN(0);
1207
1208         CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID" for truncate\n",
1209                op_data->op_ioepoch, PFID(&lli->lli_fid));
1210
1211         op_data->op_flags = MF_EPOCH_CLOSE;
1212         ll_done_writing_attr(inode, op_data);
1213         ll_pack_inode2opdata(inode, op_data, NULL);
1214
1215         rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1216         if (rc == -EAGAIN) {
1217                 /* MDS has instructed us to obtain Size-on-MDS attribute
1218                  * from OSTs and send setattr to back to MDS. */
1219                 rc = ll_som_update(inode, op_data);
1220         } else if (rc) {
1221                 CERROR("inode %lu mdc truncate failed: rc = %d\n",
1222                        inode->i_ino, rc);
1223         }
1224         RETURN(rc);
1225 }
1226
1227 static int ll_setattr_do_truncate(struct inode *inode, loff_t size)
1228 {
1229         struct obd_capa *capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
1230         int rc;
1231
1232         rc = cl_setattr_do_truncate(inode, size, capa);
1233         ll_truncate_free_capa(capa);
1234         return rc;
1235 }
1236
1237 static int ll_setattr_ost(struct inode *inode)
1238 {
1239         struct obd_capa *capa = ll_mdscapa_get(inode);
1240         int rc;
1241
1242         rc = cl_setattr_ost(inode, capa);
1243         capa_put(capa);
1244
1245         return rc;
1246 }
1247
1248 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1249  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1250  * keep these values until such a time that objects are allocated for it.
1251  * We do the MDS operations first, as it is checking permissions for us.
1252  * We don't to the MDS RPC if there is nothing that we want to store there,
1253  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1254  * going to do an RPC anyways.
1255  *
1256  * If we are doing a truncate, we will send the mtime and ctime updates
1257  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1258  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1259  * at the same time.
1260  */
1261 int ll_setattr_raw(struct inode *inode, struct iattr *attr)
1262 {
1263         struct ll_inode_info *lli = ll_i2info(inode);
1264         struct lov_stripe_md *lsm = lli->lli_smd;
1265         struct md_op_data *op_data = NULL;
1266         struct md_open_data *mod = NULL;
1267         int ia_valid = attr->ia_valid;
1268         int rc = 0, rc1 = 0;
1269         ENTRY;
1270
1271         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu valid %x\n", inode->i_ino,
1272                attr->ia_valid);
1273         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETATTR, 1);
1274
1275         if (ia_valid & ATTR_SIZE) {
1276                 if (attr->ia_size > ll_file_maxbytes(inode)) {
1277                         CDEBUG(D_INODE, "file too large %llu > "LPU64"\n",
1278                                attr->ia_size, ll_file_maxbytes(inode));
1279                         RETURN(-EFBIG);
1280                 }
1281
1282                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1283         }
1284
1285         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1286         if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
1287                 if (cfs_curproc_fsuid() != inode->i_uid &&
1288                     !cfs_capable(CFS_CAP_FOWNER))
1289                         RETURN(-EPERM);
1290         }
1291
1292         /* We mark all of the fields "set" so MDS/OST does not re-set them */
1293         if (attr->ia_valid & ATTR_CTIME) {
1294                 attr->ia_ctime = CFS_CURRENT_TIME;
1295                 attr->ia_valid |= ATTR_CTIME_SET;
1296         }
1297         if (!(ia_valid & ATTR_ATIME_SET) && (attr->ia_valid & ATTR_ATIME)) {
1298                 attr->ia_atime = CFS_CURRENT_TIME;
1299                 attr->ia_valid |= ATTR_ATIME_SET;
1300         }
1301         if (!(ia_valid & ATTR_MTIME_SET) && (attr->ia_valid & ATTR_MTIME)) {
1302                 attr->ia_mtime = CFS_CURRENT_TIME;
1303                 attr->ia_valid |= ATTR_MTIME_SET;
1304         }
1305         if ((attr->ia_valid & ATTR_CTIME) && !(attr->ia_valid & ATTR_MTIME)) {
1306                 /* To avoid stale mtime on mds, obtain it from ost and send
1307                    to mds. */
1308                 rc = cl_glimpse_size(inode);
1309                 if (rc)
1310                         RETURN(rc);
1311
1312                 attr->ia_valid |= ATTR_MTIME_SET | ATTR_MTIME;
1313                 attr->ia_mtime = inode->i_mtime;
1314         }
1315
1316         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1317                 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
1318                        LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1319                        cfs_time_current_sec());
1320
1321         /* NB: ATTR_SIZE will only be set after this point if the size
1322          * resides on the MDS, ie, this file has no objects. */
1323         if (lsm)
1324                 attr->ia_valid &= ~ATTR_SIZE;
1325
1326         /* We always do an MDS RPC, even if we're only changing the size;
1327          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1328
1329         OBD_ALLOC_PTR(op_data);
1330         if (op_data == NULL)
1331                 RETURN(-ENOMEM);
1332
1333         UNLOCK_INODE_MUTEX(inode);
1334         if (ia_valid & ATTR_SIZE)
1335                 UP_WRITE_I_ALLOC_SEM(inode);
1336         cfs_down(&lli->lli_trunc_sem);
1337         LOCK_INODE_MUTEX(inode);
1338         if (ia_valid & ATTR_SIZE)
1339                 DOWN_WRITE_I_ALLOC_SEM(inode);
1340
1341         memcpy(&op_data->op_attr, attr, sizeof(*attr));
1342
1343         /* Open epoch for truncate. */
1344         if (exp_connect_som(ll_i2mdexp(inode)) && (ia_valid & ATTR_SIZE))
1345                 op_data->op_flags = MF_EPOCH_OPEN;
1346
1347         rc = ll_md_setattr(inode, op_data, &mod);
1348         if (rc)
1349                 GOTO(out, rc);
1350
1351         ll_ioepoch_open(lli, op_data->op_ioepoch);
1352         if (!lsm || !S_ISREG(inode->i_mode)) {
1353                 CDEBUG(D_INODE, "no lsm: not setting attrs on OST\n");
1354                 GOTO(out, rc = 0);
1355         }
1356
1357         if (ia_valid & ATTR_SIZE)
1358                 rc = ll_setattr_do_truncate(inode, attr->ia_size);
1359         else if (ia_valid & (ATTR_MTIME | ATTR_MTIME_SET)) {
1360                 CDEBUG(D_INODE, "set mtime on OST inode %lu to %lu\n",
1361                        inode->i_ino, LTIME_S(attr->ia_mtime));
1362                 rc = ll_setattr_ost(inode);
1363         }
1364         EXIT;
1365 out:
1366         if (op_data) {
1367                 if (op_data->op_ioepoch)
1368                         rc1 = ll_setattr_done_writing(inode, op_data, mod);
1369                 ll_finish_md_op_data(op_data);
1370         }
1371         cfs_up(&lli->lli_trunc_sem);
1372         return rc ? rc : rc1;
1373 }
1374
1375 int ll_setattr(struct dentry *de, struct iattr *attr)
1376 {
1377         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
1378             (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
1379                 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1380
1381         if ((de->d_inode->i_mode & S_ISUID) &&
1382             !(attr->ia_mode & S_ISUID) &&
1383             !(attr->ia_valid & ATTR_KILL_SUID))
1384                 attr->ia_valid |= ATTR_KILL_SUID;
1385
1386         if (((de->d_inode->i_mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1387             !(attr->ia_mode & S_ISGID) &&
1388             !(attr->ia_valid & ATTR_KILL_SGID))
1389                 attr->ia_valid |= ATTR_KILL_SGID;
1390
1391         return ll_setattr_raw(de->d_inode, attr);
1392 }
1393
1394 int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1395                        __u64 max_age, __u32 flags)
1396 {
1397         struct ll_sb_info *sbi = ll_s2sbi(sb);
1398         struct obd_statfs obd_osfs;
1399         int rc;
1400         ENTRY;
1401
1402         rc = obd_statfs(class_exp2obd(sbi->ll_md_exp), osfs, max_age, flags);
1403         if (rc) {
1404                 CERROR("md_statfs fails: rc = %d\n", rc);
1405                 RETURN(rc);
1406         }
1407
1408         osfs->os_type = sb->s_magic;
1409
1410         CDEBUG(D_SUPER, "MDC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1411                osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,osfs->os_files);
1412
1413         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1414                 flags |= OBD_STATFS_NODELAY;
1415
1416         rc = obd_statfs_rqset(class_exp2obd(sbi->ll_dt_exp),
1417                               &obd_osfs, max_age, flags);
1418         if (rc) {
1419                 CERROR("obd_statfs fails: rc = %d\n", rc);
1420                 RETURN(rc);
1421         }
1422
1423         CDEBUG(D_SUPER, "OSC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1424                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1425                obd_osfs.os_files);
1426
1427         osfs->os_bsize = obd_osfs.os_bsize;
1428         osfs->os_blocks = obd_osfs.os_blocks;
1429         osfs->os_bfree = obd_osfs.os_bfree;
1430         osfs->os_bavail = obd_osfs.os_bavail;
1431
1432         /* If we don't have as many objects free on the OST as inodes
1433          * on the MDS, we reduce the total number of inodes to
1434          * compensate, so that the "inodes in use" number is correct.
1435          */
1436         if (obd_osfs.os_ffree < osfs->os_ffree) {
1437                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1438                         obd_osfs.os_ffree;
1439                 osfs->os_ffree = obd_osfs.os_ffree;
1440         }
1441
1442         RETURN(rc);
1443 }
1444 #ifndef HAVE_STATFS_DENTRY_PARAM
1445 int ll_statfs(struct super_block *sb, struct kstatfs *sfs)
1446 {
1447 #else
1448 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1449 {
1450         struct super_block *sb = de->d_sb;
1451 #endif
1452         struct obd_statfs osfs;
1453         int rc;
1454
1455         CDEBUG(D_VFSTRACE, "VFS Op: at "LPU64" jiffies\n", get_jiffies_64());
1456         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1457
1458         /* For now we will always get up-to-date statfs values, but in the
1459          * future we may allow some amount of caching on the client (e.g.
1460          * from QOS or lprocfs updates). */
1461         rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - 1, 0);
1462         if (rc)
1463                 return rc;
1464
1465         statfs_unpack(sfs, &osfs);
1466
1467         /* We need to downshift for all 32-bit kernels, because we can't
1468          * tell if the kernel is being called via sys_statfs64() or not.
1469          * Stop before overflowing f_bsize - in which case it is better
1470          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
1471         if (sizeof(long) < 8) {
1472                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1473                         sfs->f_bsize <<= 1;
1474
1475                         osfs.os_blocks >>= 1;
1476                         osfs.os_bfree >>= 1;
1477                         osfs.os_bavail >>= 1;
1478                 }
1479         }
1480
1481         sfs->f_blocks = osfs.os_blocks;
1482         sfs->f_bfree = osfs.os_bfree;
1483         sfs->f_bavail = osfs.os_bavail;
1484
1485         return 0;
1486 }
1487
1488 void ll_inode_size_lock(struct inode *inode, int lock_lsm)
1489 {
1490         struct ll_inode_info *lli;
1491         struct lov_stripe_md *lsm;
1492
1493         lli = ll_i2info(inode);
1494         LASSERT(lli->lli_size_sem_owner != current);
1495         cfs_down(&lli->lli_size_sem);
1496         LASSERT(lli->lli_size_sem_owner == NULL);
1497         lli->lli_size_sem_owner = current;
1498         lsm = lli->lli_smd;
1499         LASSERTF(lsm != NULL || lock_lsm == 0, "lsm %p, lock_lsm %d\n",
1500                  lsm, lock_lsm);
1501         if (lock_lsm)
1502                 lov_stripe_lock(lsm);
1503 }
1504
1505 void ll_inode_size_unlock(struct inode *inode, int unlock_lsm)
1506 {
1507         struct ll_inode_info *lli;
1508         struct lov_stripe_md *lsm;
1509
1510         lli = ll_i2info(inode);
1511         lsm = lli->lli_smd;
1512         LASSERTF(lsm != NULL || unlock_lsm == 0, "lsm %p, lock_lsm %d\n",
1513                  lsm, unlock_lsm);
1514         if (unlock_lsm)
1515                 lov_stripe_unlock(lsm);
1516         LASSERT(lli->lli_size_sem_owner == current);
1517         lli->lli_size_sem_owner = NULL;
1518         cfs_up(&lli->lli_size_sem);
1519 }
1520
1521 void ll_update_inode(struct inode *inode, struct lustre_md *md)
1522 {
1523         struct ll_inode_info *lli = ll_i2info(inode);
1524         struct mdt_body *body = md->body;
1525         struct lov_stripe_md *lsm = md->lsm;
1526         struct ll_sb_info *sbi = ll_i2sbi(inode);
1527
1528         LASSERT ((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
1529         if (lsm != NULL) {
1530                 if (lli->lli_smd == NULL) {
1531                         if (lsm->lsm_magic != LOV_MAGIC_V1 &&
1532                             lsm->lsm_magic != LOV_MAGIC_V3) {
1533                                 dump_lsm(D_ERROR, lsm);
1534                                 LBUG();
1535                         }
1536                         CDEBUG(D_INODE, "adding lsm %p to inode %lu/%u(%p)\n",
1537                                lsm, inode->i_ino, inode->i_generation, inode);
1538                         cl_inode_init(inode, md);
1539                         /* ll_inode_size_lock() requires it is only
1540                          * called with lli_smd != NULL or lock_lsm == 0
1541                          *  or we can race between lock/unlock.
1542                          *  bug 9547 */
1543                         lli->lli_smd = lsm;
1544                         lli->lli_maxbytes = lsm->lsm_maxbytes;
1545                         if (lli->lli_maxbytes > PAGE_CACHE_MAXBYTES)
1546                                 lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
1547                 } else {
1548                         LASSERT(lli->lli_smd->lsm_magic == lsm->lsm_magic &&
1549                                 lli->lli_smd->lsm_stripe_count ==
1550                                 lsm->lsm_stripe_count);
1551                         if (lov_stripe_md_cmp(lli->lli_smd, lsm)) {
1552                                 CERROR("lsm mismatch for inode %ld\n",
1553                                        inode->i_ino);
1554                                 CERROR("lli_smd:\n");
1555                                 dump_lsm(D_ERROR, lli->lli_smd);
1556                                 CERROR("lsm:\n");
1557                                 dump_lsm(D_ERROR, lsm);
1558                                 LBUG();
1559                         }
1560                 }
1561                 if (lli->lli_smd != lsm)
1562                         obd_free_memmd(ll_i2dtexp(inode), &lsm);
1563         }
1564
1565         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1566                 if (body->valid & OBD_MD_FLRMTPERM)
1567                         ll_update_remote_perm(inode, md->remote_perm);
1568         }
1569 #ifdef CONFIG_FS_POSIX_ACL
1570         else if (body->valid & OBD_MD_FLACL) {
1571                 cfs_spin_lock(&lli->lli_lock);
1572                 if (lli->lli_posix_acl)
1573                         posix_acl_release(lli->lli_posix_acl);
1574                 lli->lli_posix_acl = md->posix_acl;
1575                 cfs_spin_unlock(&lli->lli_lock);
1576         }
1577 #endif
1578         inode->i_ino = cl_fid_build_ino(&body->fid1);
1579         inode->i_generation = cl_fid_build_gen(&body->fid1);
1580
1581         if (body->valid & OBD_MD_FLATIME &&
1582             body->atime > LTIME_S(inode->i_atime))
1583                 LTIME_S(inode->i_atime) = body->atime;
1584
1585         /* mtime is always updated with ctime, but can be set in past.
1586            As write and utime(2) may happen within 1 second, and utime's
1587            mtime has a priority over write's one, so take mtime from mds
1588            for the same ctimes. */
1589         if (body->valid & OBD_MD_FLCTIME &&
1590             body->ctime >= LTIME_S(inode->i_ctime)) {
1591                 LTIME_S(inode->i_ctime) = body->ctime;
1592                 if (body->valid & OBD_MD_FLMTIME) {
1593                         CDEBUG(D_INODE, "setting ino %lu mtime "
1594                                "from %lu to "LPU64"\n", inode->i_ino,
1595                                LTIME_S(inode->i_mtime), body->mtime);
1596                         LTIME_S(inode->i_mtime) = body->mtime;
1597                 }
1598         }
1599         if (body->valid & OBD_MD_FLMODE)
1600                 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1601         if (body->valid & OBD_MD_FLTYPE)
1602                 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1603         LASSERT(inode->i_mode != 0);
1604         if (S_ISREG(inode->i_mode)) {
1605                 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, LL_MAX_BLKSIZE_BITS);
1606         } else {
1607                 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1608         }
1609 #ifdef HAVE_INODE_BLKSIZE
1610         inode->i_blksize = 1<<inode->i_blkbits;
1611 #endif
1612         if (body->valid & OBD_MD_FLUID)
1613                 inode->i_uid = body->uid;
1614         if (body->valid & OBD_MD_FLGID)
1615                 inode->i_gid = body->gid;
1616         if (body->valid & OBD_MD_FLFLAGS)
1617                 inode->i_flags = ll_ext_to_inode_flags(body->flags);
1618         if (body->valid & OBD_MD_FLNLINK)
1619                 inode->i_nlink = body->nlink;
1620         if (body->valid & OBD_MD_FLRDEV)
1621                 inode->i_rdev = old_decode_dev(body->rdev);
1622
1623         if (body->valid & OBD_MD_FLID) {
1624                 /* FID shouldn't be changed! */
1625                 if (fid_is_sane(&lli->lli_fid)) {
1626                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1627                                  "Trying to change FID "DFID
1628                                  " to the "DFID", inode %lu/%u(%p)\n",
1629                                  PFID(&lli->lli_fid), PFID(&body->fid1),
1630                                  inode->i_ino, inode->i_generation, inode);
1631                 } else
1632                         lli->lli_fid = body->fid1;
1633         }
1634
1635         LASSERT(fid_seq(&lli->lli_fid) != 0);
1636
1637         if (body->valid & OBD_MD_FLSIZE) {
1638                 if (exp_connect_som(ll_i2mdexp(inode)) &&
1639                     S_ISREG(inode->i_mode) && lli->lli_smd) {
1640                         struct lustre_handle lockh;
1641                         ldlm_mode_t mode;
1642
1643                         /* As it is possible a blocking ast has been processed
1644                          * by this time, we need to check there is an UPDATE
1645                          * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1646                          * it. */
1647                         mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1648                                                &lockh);
1649                         if (mode) {
1650                                 if (lli->lli_flags & (LLIF_DONE_WRITING |
1651                                                       LLIF_EPOCH_PENDING |
1652                                                       LLIF_SOM_DIRTY)) {
1653                                         CERROR("ino %lu flags %lu still has "
1654                                                "size authority! do not trust "
1655                                                "the size got from MDS\n",
1656                                                inode->i_ino, lli->lli_flags);
1657                                 } else {
1658                                         /* Use old size assignment to avoid
1659                                          * deadlock bz14138 & bz14326 */
1660                                         inode->i_size = body->size;
1661                                         lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1662                                 }
1663                                 ldlm_lock_decref(&lockh, mode);
1664                         }
1665                 } else {
1666                         /* Use old size assignment to avoid
1667                          * deadlock bz14138 & bz14326 */
1668                         inode->i_size = body->size;
1669                 }
1670
1671                 if (body->valid & OBD_MD_FLBLOCKS)
1672                         inode->i_blocks = body->blocks;
1673         }
1674
1675         if (body->valid & OBD_MD_FLMDSCAPA) {
1676                 LASSERT(md->mds_capa);
1677                 ll_add_capa(inode, md->mds_capa);
1678         }
1679         if (body->valid & OBD_MD_FLOSSCAPA) {
1680                 LASSERT(md->oss_capa);
1681                 ll_add_capa(inode, md->oss_capa);
1682         }
1683 }
1684
1685 static struct backing_dev_info ll_backing_dev_info = {
1686         .ra_pages       = 0,    /* No readahead */
1687 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
1688         .capabilities   = 0,    /* Does contribute to dirty memory */
1689 #else
1690         .memory_backed  = 0,    /* Does contribute to dirty memory */
1691 #endif
1692 };
1693
1694 void ll_read_inode2(struct inode *inode, void *opaque)
1695 {
1696         struct lustre_md *md = opaque;
1697         struct ll_inode_info *lli = ll_i2info(inode);
1698         ENTRY;
1699
1700         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
1701                inode->i_ino, inode->i_generation, inode);
1702
1703         ll_lli_init(lli);
1704
1705         LASSERT(!lli->lli_smd);
1706
1707         /* Core attributes from the MDS first.  This is a new inode, and
1708          * the VFS doesn't zero times in the core inode so we have to do
1709          * it ourselves.  They will be overwritten by either MDS or OST
1710          * attributes - we just need to make sure they aren't newer. */
1711         LTIME_S(inode->i_mtime) = 0;
1712         LTIME_S(inode->i_atime) = 0;
1713         LTIME_S(inode->i_ctime) = 0;
1714         inode->i_rdev = 0;
1715         ll_update_inode(inode, md);
1716
1717         /* OIDEBUG(inode); */
1718
1719         if (S_ISREG(inode->i_mode)) {
1720                 struct ll_sb_info *sbi = ll_i2sbi(inode);
1721                 inode->i_op = &ll_file_inode_operations;
1722                 inode->i_fop = sbi->ll_fop;
1723                 inode->i_mapping->a_ops = &ll_aops;
1724                 EXIT;
1725         } else if (S_ISDIR(inode->i_mode)) {
1726                 inode->i_op = &ll_dir_inode_operations;
1727                 inode->i_fop = &ll_dir_operations;
1728                 inode->i_mapping->a_ops = &ll_dir_aops;
1729                 EXIT;
1730         } else if (S_ISLNK(inode->i_mode)) {
1731                 inode->i_op = &ll_fast_symlink_inode_operations;
1732                 EXIT;
1733         } else {
1734                 inode->i_op = &ll_special_inode_operations;
1735
1736                 init_special_inode(inode, inode->i_mode,
1737                                    kdev_t_to_nr(inode->i_rdev));
1738
1739                 /* initializing backing dev info. */
1740                 inode->i_mapping->backing_dev_info = &ll_backing_dev_info;
1741
1742                 EXIT;
1743         }
1744 }
1745
1746 void ll_delete_inode(struct inode *inode)
1747 {
1748         struct ll_sb_info *sbi = ll_i2sbi(inode);
1749         int rc;
1750         ENTRY;
1751
1752         rc = obd_fid_delete(sbi->ll_md_exp, ll_inode2fid(inode));
1753         if (rc) {
1754                 CERROR("fid_delete() failed, rc %d\n", rc);
1755         }
1756         truncate_inode_pages(&inode->i_data, 0);
1757         clear_inode(inode);
1758
1759         EXIT;
1760 }
1761
1762 int ll_iocontrol(struct inode *inode, struct file *file,
1763                  unsigned int cmd, unsigned long arg)
1764 {
1765         struct ll_sb_info *sbi = ll_i2sbi(inode);
1766         struct ptlrpc_request *req = NULL;
1767         int rc, flags = 0;
1768         ENTRY;
1769
1770         switch(cmd) {
1771         case FSFILT_IOC_GETFLAGS: {
1772                 struct mdt_body *body;
1773                 struct obd_capa *oc;
1774
1775                 oc = ll_mdscapa_get(inode);
1776                 rc = md_getattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
1777                                 OBD_MD_FLFLAGS, 0, &req);
1778                 capa_put(oc);
1779                 if (rc) {
1780                         CERROR("failure %d inode %lu\n", rc, inode->i_ino);
1781                         RETURN(-abs(rc));
1782                 }
1783
1784                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1785
1786                 flags = body->flags;
1787
1788                 ptlrpc_req_finished(req);
1789
1790                 RETURN(put_user(flags, (int *)arg));
1791         }
1792         case FSFILT_IOC_SETFLAGS: {
1793                 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1794                 struct obd_info oinfo = { { { 0 } } };
1795                 struct md_op_data *op_data;
1796
1797                 if (get_user(flags, (int *)arg))
1798                         RETURN(-EFAULT);
1799
1800                 oinfo.oi_md = lsm;
1801                 OBDO_ALLOC(oinfo.oi_oa);
1802                 if (!oinfo.oi_oa)
1803                         RETURN(-ENOMEM);
1804
1805                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1806                                              LUSTRE_OPC_ANY, NULL);
1807                 if (IS_ERR(op_data))
1808                         RETURN(PTR_ERR(op_data));
1809
1810                 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
1811                 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1812                 rc = md_setattr(sbi->ll_md_exp, op_data,
1813                                 NULL, 0, NULL, 0, &req, NULL);
1814                 ll_finish_md_op_data(op_data);
1815                 ptlrpc_req_finished(req);
1816                 if (rc) {
1817                         OBDO_FREE(oinfo.oi_oa);
1818                         RETURN(rc);
1819                 }
1820
1821                 if (lsm == NULL) {
1822                         OBDO_FREE(oinfo.oi_oa);
1823                         GOTO(update_cache, rc);
1824                 }
1825
1826                 oinfo.oi_oa->o_id = lsm->lsm_object_id;
1827                 oinfo.oi_oa->o_gr = lsm->lsm_object_gr;
1828                 oinfo.oi_oa->o_flags = flags;
1829                 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
1830                                        OBD_MD_FLGROUP;
1831                 oinfo.oi_capa = ll_mdscapa_get(inode);
1832
1833                 obdo_from_inode(oinfo.oi_oa, inode,
1834                                 OBD_MD_FLFID | OBD_MD_FLGENER);
1835                 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1836                 capa_put(oinfo.oi_capa);
1837                 OBDO_FREE(oinfo.oi_oa);
1838                 if (rc) {
1839                         if (rc != -EPERM && rc != -EACCES)
1840                                 CERROR("osc_setattr_async fails: rc = %d\n",rc);
1841                         RETURN(rc);
1842                 }
1843
1844                 EXIT;
1845 update_cache:
1846                 inode->i_flags = ll_ext_to_inode_flags(flags |
1847                                                        MDS_BFLAG_EXT_FLAGS);
1848                 return 0;
1849         }
1850         default:
1851                 RETURN(-ENOSYS);
1852         }
1853
1854         RETURN(0);
1855 }
1856
1857 int ll_flush_ctx(struct inode *inode)
1858 {
1859         struct ll_sb_info  *sbi = ll_i2sbi(inode);
1860
1861         CDEBUG(D_SEC, "flush context for user %d\n", cfs_curproc_uid());
1862
1863         obd_set_info_async(sbi->ll_md_exp,
1864                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1865                            0, NULL, NULL);
1866         obd_set_info_async(sbi->ll_dt_exp,
1867                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1868                            0, NULL, NULL);
1869         return 0;
1870 }
1871
1872 /* umount -f client means force down, don't save state */
1873 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
1874 void ll_umount_begin(struct vfsmount *vfsmnt, int flags)
1875 {
1876         struct super_block *sb = vfsmnt->mnt_sb;
1877 #else
1878 void ll_umount_begin(struct super_block *sb)
1879 {
1880 #endif
1881         struct lustre_sb_info *lsi = s2lsi(sb);
1882         struct ll_sb_info *sbi = ll_s2sbi(sb);
1883         struct obd_device *obd;
1884         struct obd_ioctl_data *ioc_data;
1885         ENTRY;
1886
1887 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
1888         if (!(flags & MNT_FORCE)) {
1889                 EXIT;
1890                 return;
1891         }
1892 #endif
1893
1894         /* Tell the MGC we got umount -f */
1895         lsi->lsi_flags |= LSI_UMOUNT_FORCE;
1896
1897         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
1898                sb->s_count, atomic_read(&sb->s_active));
1899
1900         obd = class_exp2obd(sbi->ll_md_exp);
1901         if (obd == NULL) {
1902                 CERROR("Invalid MDC connection handle "LPX64"\n",
1903                        sbi->ll_md_exp->exp_handle.h_cookie);
1904                 EXIT;
1905                 return;
1906         }
1907         obd->obd_force = 1;
1908
1909         obd = class_exp2obd(sbi->ll_dt_exp);
1910         if (obd == NULL) {
1911                 CERROR("Invalid LOV connection handle "LPX64"\n",
1912                        sbi->ll_dt_exp->exp_handle.h_cookie);
1913                 EXIT;
1914                 return;
1915         }
1916         obd->obd_force = 1;
1917
1918         OBD_ALLOC_PTR(ioc_data);
1919         if (ioc_data) {
1920                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
1921                               sizeof ioc_data, ioc_data, NULL);
1922
1923                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
1924                               sizeof ioc_data, ioc_data, NULL);
1925
1926                 OBD_FREE_PTR(ioc_data);
1927         }
1928
1929
1930         /* Really, we'd like to wait until there are no requests outstanding,
1931          * and then continue.  For now, we just invalidate the requests,
1932          * schedule() and sleep one second if needed, and hope.
1933          */
1934         cfs_schedule();
1935 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
1936         if (atomic_read(&vfsmnt->mnt_count) > 2) {
1937                 cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
1938                                                    cfs_time_seconds(1));
1939                 if (atomic_read(&vfsmnt->mnt_count) > 2)
1940                         LCONSOLE_WARN("Mount still busy with %d refs! You "
1941                                       "may try to umount it a bit later\n",
1942                                       atomic_read(&vfsmnt->mnt_count));
1943         }
1944 #endif
1945
1946         EXIT;
1947 }
1948
1949 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
1950 {
1951         struct ll_sb_info *sbi = ll_s2sbi(sb);
1952         int err;
1953         __u32 read_only;
1954
1955         if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
1956                 read_only = *flags & MS_RDONLY;
1957                 err = obd_set_info_async(sbi->ll_md_exp,
1958                                          sizeof(KEY_READ_ONLY),
1959                                          KEY_READ_ONLY, sizeof(read_only),
1960                                          &read_only, NULL);
1961                 if (err) {
1962                         CERROR("Failed to change the read-only flag during "
1963                                "remount: %d\n", err);
1964                         return err;
1965                 }
1966
1967                 if (read_only)
1968                         sb->s_flags |= MS_RDONLY;
1969                 else
1970                         sb->s_flags &= ~MS_RDONLY;
1971         }
1972         return 0;
1973 }
1974
1975 int ll_prep_inode(struct inode **inode,
1976                   struct ptlrpc_request *req,
1977                   struct super_block *sb)
1978 {
1979         struct ll_sb_info *sbi = NULL;
1980         struct lustre_md md;
1981         int rc;
1982         ENTRY;
1983
1984         LASSERT(*inode || sb);
1985         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
1986         prune_deathrow(sbi, 1);
1987         memset(&md, 0, sizeof(struct lustre_md));
1988
1989         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
1990                               sbi->ll_md_exp, &md);
1991         if (rc)
1992                 RETURN(rc);
1993
1994         if (*inode) {
1995                 ll_update_inode(*inode, &md);
1996         } else {
1997                 LASSERT(sb != NULL);
1998
1999                 /*
2000                  * At this point server returns to client's same fid as client
2001                  * generated for creating. So using ->fid1 is okay here.
2002                  */
2003                 LASSERT(fid_is_sane(&md.body->fid1));
2004
2005                 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1), &md);
2006                 if (*inode == NULL || IS_ERR(*inode)) {
2007                         if (md.lsm)
2008                                 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2009 #ifdef CONFIG_FS_POSIX_ACL
2010                         if (md.posix_acl) {
2011                                 posix_acl_release(md.posix_acl);
2012                                 md.posix_acl = NULL;
2013                         }
2014 #endif
2015                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2016                         *inode = NULL;
2017                         CERROR("new_inode -fatal: rc %d\n", rc);
2018                         GOTO(out, rc);
2019                 }
2020         }
2021
2022 out:
2023         md_free_lustre_md(sbi->ll_md_exp, &md);
2024         RETURN(rc);
2025 }
2026
2027 int ll_obd_statfs(struct inode *inode, void *arg)
2028 {
2029         struct ll_sb_info *sbi = NULL;
2030         struct obd_export *exp;
2031         char *buf = NULL;
2032         struct obd_ioctl_data *data = NULL;
2033         __u32 type;
2034         int len = 0, rc;
2035
2036         if (!inode || !(sbi = ll_i2sbi(inode)))
2037                 GOTO(out_statfs, rc = -EINVAL);
2038
2039         rc = obd_ioctl_getdata(&buf, &len, arg);
2040         if (rc)
2041                 GOTO(out_statfs, rc);
2042
2043         data = (void*)buf;
2044         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2045             !data->ioc_pbuf1 || !data->ioc_pbuf2)
2046                 GOTO(out_statfs, rc = -EINVAL);
2047
2048         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2049         if (type == LL_STATFS_MDC)
2050                 exp = sbi->ll_md_exp;
2051         else if (type == LL_STATFS_LOV)
2052                 exp = sbi->ll_dt_exp;
2053         else
2054                 GOTO(out_statfs, rc = -ENODEV);
2055
2056         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
2057         if (rc)
2058                 GOTO(out_statfs, rc);
2059 out_statfs:
2060         if (buf)
2061                 obd_ioctl_freedata(buf, len);
2062         return rc;
2063 }
2064
2065 int ll_process_config(struct lustre_cfg *lcfg)
2066 {
2067         char *ptr;
2068         void *sb;
2069         struct lprocfs_static_vars lvars;
2070         unsigned long x;
2071         int rc = 0;
2072
2073         lprocfs_llite_init_vars(&lvars);
2074
2075         /* The instance name contains the sb: lustre-client-aacfe000 */
2076         ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2077         if (!ptr || !*(++ptr))
2078                 return -EINVAL;
2079         if (sscanf(ptr, "%lx", &x) != 1)
2080                 return -EINVAL;
2081         sb = (void *)x;
2082         /* This better be a real Lustre superblock! */
2083         LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2084
2085         /* Note we have not called client_common_fill_super yet, so
2086            proc fns must be able to handle that! */
2087         rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2088                                       lcfg, sb);
2089         if (rc > 0)
2090                 rc = 0;
2091         return(rc);
2092 }
2093
2094 /* this function prepares md_op_data hint for passing ot down to MD stack. */
2095 struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data,
2096                                        struct inode *i1, struct inode *i2,
2097                                        const char *name, int namelen,
2098                                        int mode, __u32 opc, void *data)
2099 {
2100         LASSERT(i1 != NULL);
2101
2102         if (namelen > ll_i2sbi(i1)->ll_namelen)
2103                 return ERR_PTR(-ENAMETOOLONG);
2104
2105         if (op_data == NULL)
2106                 OBD_ALLOC_PTR(op_data);
2107
2108         if (op_data == NULL)
2109                 return ERR_PTR(-ENOMEM);
2110
2111         ll_i2gids(op_data->op_suppgids, i1, i2);
2112         op_data->op_fid1 = *ll_inode2fid(i1);
2113         op_data->op_capa1 = ll_mdscapa_get(i1);
2114
2115         if (i2) {
2116                 op_data->op_fid2 = *ll_inode2fid(i2);
2117                 op_data->op_capa2 = ll_mdscapa_get(i2);
2118         } else {
2119                 fid_zero(&op_data->op_fid2);
2120                 op_data->op_capa2 = NULL;
2121         }
2122
2123         op_data->op_name = name;
2124         op_data->op_namelen = namelen;
2125         op_data->op_mode = mode;
2126         op_data->op_mod_time = cfs_time_current_sec();
2127         op_data->op_fsuid = cfs_curproc_fsuid();
2128         op_data->op_fsgid = cfs_curproc_fsgid();
2129         op_data->op_cap = cfs_curproc_cap_pack();
2130         op_data->op_bias = MDS_CHECK_SPLIT;
2131         op_data->op_opc = opc;
2132         op_data->op_mds = 0;
2133         op_data->op_data = data;
2134
2135         return op_data;
2136 }
2137
2138 void ll_finish_md_op_data(struct md_op_data *op_data)
2139 {
2140         capa_put(op_data->op_capa1);
2141         capa_put(op_data->op_capa2);
2142         OBD_FREE_PTR(op_data);
2143 }
2144
2145 int ll_show_options(struct seq_file *seq, struct vfsmount *vfs)
2146 {
2147         struct ll_sb_info *sbi;
2148
2149         LASSERT((seq != NULL) && (vfs != NULL));
2150         sbi = ll_s2sbi(vfs->mnt_sb);
2151
2152         if (sbi->ll_flags & LL_SBI_NOLCK)
2153                 seq_puts(seq, ",nolock");
2154
2155         if (sbi->ll_flags & LL_SBI_FLOCK)
2156                 seq_puts(seq, ",flock");
2157
2158         if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2159                 seq_puts(seq, ",localflock");
2160
2161         if (sbi->ll_flags & LL_SBI_USER_XATTR)
2162                 seq_puts(seq, ",user_xattr");
2163
2164         if (sbi->ll_flags & LL_SBI_ACL)
2165                 seq_puts(seq, ",acl");
2166
2167         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2168                 seq_puts(seq, ",lazystatfs");
2169
2170         RETURN(0);
2171 }