Whamcloud - gitweb
b=11270
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Light Super operations
5  *
6  *  Copyright (c) 2002-2005 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LLITE
25
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/random.h>
29 #include <linux/version.h>
30
31 #include <lustre_lite.h>
32 #include <lustre_ha.h>
33 #include <lustre_dlm.h>
34 #include <lprocfs_status.h>
35 #include <lustre_disk.h>
36 #include <lustre_param.h>
37 #include <lustre_log.h>
38 #include <obd_cksum.h>
39 #include "llite_internal.h"
40
41 cfs_mem_cache_t *ll_file_data_slab;
42
43 LIST_HEAD(ll_super_blocks);
44 spinlock_t ll_sb_lock = SPIN_LOCK_UNLOCKED;
45
46 extern struct address_space_operations ll_aops;
47 extern struct address_space_operations ll_dir_aops;
48
49 #ifndef log2
50 #define log2(n) ffz(~(n))
51 #endif
52
53
54 static struct ll_sb_info *ll_init_sbi(void)
55 {
56         struct ll_sb_info *sbi = NULL;
57         class_uuid_t uuid;
58         int i;
59         ENTRY;
60
61         OBD_ALLOC(sbi, sizeof(*sbi));
62         if (!sbi)
63                 RETURN(NULL);
64
65         spin_lock_init(&sbi->ll_lock);
66         spin_lock_init(&sbi->ll_lco.lco_lock);
67         spin_lock_init(&sbi->ll_pp_extent_lock);
68         spin_lock_init(&sbi->ll_process_lock);
69         sbi->ll_rw_stats_on = 0;
70         INIT_LIST_HEAD(&sbi->ll_pglist);
71         if (num_physpages >> (20 - CFS_PAGE_SHIFT) < 512)
72                 sbi->ll_async_page_max = num_physpages / 2;
73         else
74                 sbi->ll_async_page_max = (num_physpages / 4) * 3;
75         sbi->ll_ra_info.ra_max_pages = min(num_physpages / 8,
76                                            SBI_DEFAULT_READAHEAD_MAX);
77         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
78                                            SBI_DEFAULT_READAHEAD_WHOLE_MAX;
79         sbi->ll_contention_time = SBI_DEFAULT_CONTENTION_SECONDS;
80         sbi->ll_lockless_truncate_enable = SBI_DEFAULT_LOCKLESS_TRUNCATE_ENABLE;
81         INIT_LIST_HEAD(&sbi->ll_conn_chain);
82         INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
83
84         ll_generate_random_uuid(uuid);
85         class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
86         CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
87
88         spin_lock(&ll_sb_lock);
89         list_add_tail(&sbi->ll_list, &ll_super_blocks);
90         spin_unlock(&ll_sb_lock);
91
92 #ifdef ENABLE_LLITE_CHECKSUM
93         sbi->ll_flags |= LL_SBI_CHECKSUM;
94 #endif
95
96 #ifdef HAVE_LRU_RESIZE_SUPPORT
97         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
98 #endif
99
100 #ifdef HAVE_EXPORT___IGET
101         INIT_LIST_HEAD(&sbi->ll_deathrow);
102         spin_lock_init(&sbi->ll_deathrow_lock);
103 #endif
104         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
105                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].pp_r_hist.oh_lock);
106                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].pp_w_hist.oh_lock);
107         }
108
109         RETURN(sbi);
110 }
111
112 void ll_free_sbi(struct super_block *sb)
113 {
114         struct ll_sb_info *sbi = ll_s2sbi(sb);
115         ENTRY;
116
117         if (sbi != NULL) {
118                 spin_lock(&ll_sb_lock);
119                 list_del(&sbi->ll_list);
120                 spin_unlock(&ll_sb_lock);
121                 OBD_FREE(sbi, sizeof(*sbi));
122         }
123         EXIT;
124 }
125
126 static struct dentry_operations ll_d_root_ops = {
127 #ifdef DCACHE_LUSTRE_INVALID
128         .d_compare = ll_dcompare,
129 #endif
130 };
131
132 /* Initialize the default and maximum LOV EA and cookie sizes.  This allows
133  * us to make MDS RPCs with large enough reply buffers to hold the
134  * maximum-sized (= maximum striped) EA and cookie without having to
135  * calculate this (via a call into the LOV + OSCs) each time we make an RPC. */
136 static int ll_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
137 {
138         struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC };
139         __u32 valsize = sizeof(struct lov_desc);
140         int rc, easize, def_easize, cookiesize;
141         struct lov_desc desc;
142         __u32 stripes;
143         ENTRY;
144
145         rc = obd_get_info(dt_exp, strlen(KEY_LOVDESC) + 1, KEY_LOVDESC,
146                           &valsize, &desc);
147         if (rc)
148                 RETURN(rc);
149
150         stripes = min(desc.ld_tgt_count, (__u32)LOV_MAX_STRIPE_COUNT);
151         lsm.lsm_stripe_count = stripes;
152         easize = obd_size_diskmd(dt_exp, &lsm);
153
154         lsm.lsm_stripe_count = desc.ld_default_stripe_count;
155         def_easize = obd_size_diskmd(dt_exp, &lsm);
156
157         cookiesize = stripes * sizeof(struct llog_cookie);
158
159         CDEBUG(D_HA, "updating max_mdsize/max_cookiesize: %d/%d\n",
160                easize, cookiesize);
161
162         rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize);
163         RETURN(rc);
164 }
165
166 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
167 {
168         struct inode *root = 0;
169         struct ll_sb_info *sbi = ll_s2sbi(sb);
170         struct obd_device *obd;
171         struct lu_fid rootfid;
172         struct obd_capa *oc = NULL;
173         struct obd_statfs osfs;
174         struct ptlrpc_request *request = NULL;
175         struct lustre_handle dt_conn = {0, };
176         struct lustre_handle md_conn = {0, };
177         struct obd_connect_data *data = NULL;
178         struct lustre_md lmd;
179         obd_valid valid;
180         int size, err, checksum;
181         ENTRY;
182
183         obd = class_name2obd(md);
184         if (!obd) {
185                 CERROR("MD %s: not setup or attached\n", md);
186                 RETURN(-EINVAL);
187         }
188
189         OBD_ALLOC_PTR(data);
190         if (data == NULL)
191                 RETURN(-ENOMEM);
192
193         if (proc_lustre_fs_root) {
194                 err = lprocfs_register_mountpoint(proc_lustre_fs_root, sb,
195                                                   dt, md);
196                 if (err < 0)
197                         CERROR("could not register mount in /proc/lustre");
198         }
199
200         /* indicate the features supported by this client */
201         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
202                                   OBD_CONNECT_JOIN     | OBD_CONNECT_ATTRFID  |
203                                   OBD_CONNECT_VERSION  | OBD_CONNECT_MDS_CAPA |
204                                   OBD_CONNECT_OSS_CAPA | OBD_CONNECT_CANCELSET|
205                                   OBD_CONNECT_FID;
206
207 #ifdef HAVE_LRU_RESIZE_SUPPORT
208         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
209                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
210 #endif
211 #ifdef CONFIG_FS_POSIX_ACL
212         data->ocd_connect_flags |= OBD_CONNECT_ACL;
213 #endif
214         data->ocd_ibits_known = MDS_INODELOCK_FULL;
215         data->ocd_version = LUSTRE_VERSION_CODE;
216
217         if (sb->s_flags & MS_RDONLY)
218                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
219         if (sbi->ll_flags & LL_SBI_USER_XATTR)
220                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
221
222 #ifdef HAVE_MS_FLOCK_LOCK
223         /* force vfs to use lustre handler for flock() calls - bug 10743 */
224         sb->s_flags |= MS_FLOCK_LOCK;
225 #endif
226         
227         if (sbi->ll_flags & LL_SBI_FLOCK)
228                 sbi->ll_fop = &ll_file_operations_flock;
229         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
230                 sbi->ll_fop = &ll_file_operations;
231         else
232                 sbi->ll_fop = &ll_file_operations_noflock;
233
234         /* real client */
235         data->ocd_connect_flags |= OBD_CONNECT_REAL;
236         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
237                 data->ocd_connect_flags &= ~OBD_CONNECT_LCL_CLIENT;
238                 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT;
239         } else {
240                 data->ocd_connect_flags &= ~OBD_CONNECT_RMT_CLIENT;
241                 data->ocd_connect_flags |= OBD_CONNECT_LCL_CLIENT;
242         }
243
244         err = obd_connect(NULL, &md_conn, obd, &sbi->ll_sb_uuid, data, NULL);
245         if (err == -EBUSY) {
246                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
247                                    "recovery, of which this client is not a "
248                                    "part. Please wait for recovery to complete,"
249                                    " abort, or time out.\n", md);
250                 GOTO(out, err);
251         } else if (err) {
252                 CERROR("cannot connect to %s: rc = %d\n", md, err);
253                 GOTO(out, err);
254         }
255         sbi->ll_md_exp = class_conn2export(&md_conn);
256
257         err = obd_fid_init(sbi->ll_md_exp);
258         if (err) {
259                 CERROR("Can't init metadata layer FID infrastructure, "
260                        "rc %d\n", err);
261                 GOTO(out_md, err);
262         }
263
264         err = obd_statfs(obd, &osfs, cfs_time_current_64() - HZ, 0);
265         if (err)
266                 GOTO(out_md_fid, err);
267
268         size = sizeof(*data);
269         err = obd_get_info(sbi->ll_md_exp, strlen(KEY_CONN_DATA),
270                            KEY_CONN_DATA,  &size, data);
271         if (err) {
272                 CERROR("Get connect data failed: %d \n", err);
273                 GOTO(out_md, err);
274         }
275
276         LASSERT(osfs.os_bsize);
277         sb->s_blocksize = osfs.os_bsize;
278         sb->s_blocksize_bits = log2(osfs.os_bsize);
279         sb->s_magic = LL_SUPER_MAGIC;
280
281         /* for bug 11559. in $LINUX/fs/read_write.c, function do_sendfile():
282          *         retval = in_file->f_op->sendfile(...);
283          *         if (*ppos > max)
284          *                 retval = -EOVERFLOW;
285          *
286          * it will check if *ppos is greater than max. However, max equals to
287          * s_maxbytes, which is a negative integer in a x86_64 box since loff_t
288          * has been defined as a signed long long ineger in linux kernel. */
289 #if BITS_PER_LONG == 64
290         sb->s_maxbytes = PAGE_CACHE_MAXBYTES >> 1;
291 #else
292         sb->s_maxbytes = PAGE_CACHE_MAXBYTES;
293 #endif
294         sbi->ll_namelen = osfs.os_namelen;
295         sbi->ll_max_rw_chunk = LL_DEFAULT_MAX_RW_CHUNK;
296
297         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
298             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
299                 LCONSOLE_INFO("Disabling user_xattr feature because "
300                               "it is not supported on the server\n");
301                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
302         }
303
304         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
305 #ifdef MS_POSIXACL
306                 sb->s_flags |= MS_POSIXACL;
307 #endif
308                 sbi->ll_flags |= LL_SBI_ACL;
309         } else {
310                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
311 #ifdef MS_POSIXACL
312                 sb->s_flags &= ~MS_POSIXACL;
313 #endif
314                 sbi->ll_flags &= ~LL_SBI_ACL;
315         }
316
317         if (data->ocd_connect_flags & OBD_CONNECT_JOIN)
318                 sbi->ll_flags |= LL_SBI_JOIN;
319
320         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
321                 if (!(data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT)) {
322                         /* sometimes local client claims to be remote, but mdt
323                          * will disagree when client gss not applied. */
324                         LCONSOLE_INFO("client claims to be remote, but server "
325                                       "rejected, forced to be local.\n");
326                         sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
327                 }
328         } else {
329                 if (!(data->ocd_connect_flags & OBD_CONNECT_LCL_CLIENT)) {
330                         /* with gss applied, remote client can not claim to be
331                          * local, so mdt maybe force client to be remote. */
332                         LCONSOLE_INFO("client claims to be local, but server "
333                                       "rejected, forced to be remote.\n");
334                         sbi->ll_flags |= LL_SBI_RMT_CLIENT;
335                 }
336         }
337
338         if (data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) {
339                 LCONSOLE_INFO("client enabled MDS capability!\n");
340                 sbi->ll_flags |= LL_SBI_MDS_CAPA;
341         }
342
343         if (data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA) {
344                 LCONSOLE_INFO("client enabled OSS capability!\n");
345                 sbi->ll_flags |= LL_SBI_OSS_CAPA;
346         }
347
348         sbi->ll_sdev_orig = sb->s_dev;
349 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
350         /* We set sb->s_dev equal on all lustre clients in order to support
351          * NFS export clustering.  NFSD requires that the FSID be the same
352          * on all clients. */
353         /* s_dev is also used in lt_compare() to compare two fs, but that is
354          * only a node-local comparison. */
355
356         /* XXX: this will not work with LMV */
357         sb->s_dev = get_uuid2int(sbi2mdc(sbi)->cl_target_uuid.uuid,
358                                  strlen(sbi2mdc(sbi)->cl_target_uuid.uuid));
359 #endif
360
361         obd = class_name2obd(dt);
362         if (!obd) {
363                 CERROR("DT %s: not setup or attached\n", dt);
364                 GOTO(out_md_fid, err = -ENODEV);
365         }
366
367         data->ocd_connect_flags = OBD_CONNECT_GRANT     | OBD_CONNECT_VERSION  |
368                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
369                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID      |
370                                   OBD_CONNECT_SRVLOCK   | OBD_CONNECT_TRUNCLOCK;
371         if (sbi->ll_flags & LL_SBI_OSS_CAPA)
372                 data->ocd_connect_flags |= OBD_CONNECT_OSS_CAPA;
373
374         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
375                 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
376                  * disabled by default, because it can still be enabled on the
377                  * fly via /proc. As a consequence, we still need to come to an
378                  * agreement on the supported algorithms at connect time */
379                 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
380
381                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
382                         data->ocd_cksum_types = OBD_CKSUM_ADLER;
383                 else
384                         /* send the list of supported checksum types */
385                         data->ocd_cksum_types = OBD_CKSUM_ALL;
386         }
387
388 #ifdef HAVE_LRU_RESIZE_SUPPORT
389         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
390 #endif
391         CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d "
392                "ocd_grant: %d\n", data->ocd_connect_flags,
393                data->ocd_version, data->ocd_grant);
394
395         obd->obd_upcall.onu_owner = &sbi->ll_lco;
396         obd->obd_upcall.onu_upcall = ll_ocd_update;
397         data->ocd_brw_size = PTLRPC_MAX_BRW_PAGES << CFS_PAGE_SHIFT;
398
399         err = obd_connect(NULL, &dt_conn, obd, &sbi->ll_sb_uuid, data, NULL);
400         if (err == -EBUSY) {
401                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
402                                    "recovery, of which this client is not a "
403                                    "part.  Please wait for recovery to "
404                                    "complete, abort, or time out.\n", dt);
405                 GOTO(out_md_fid, err);
406         } else if (err) {
407                 CERROR("Cannot connect to %s: rc = %d\n", dt, err);
408                 GOTO(out_md_fid, err);
409         }
410
411         sbi->ll_dt_exp = class_conn2export(&dt_conn);
412
413         err = obd_fid_init(sbi->ll_dt_exp);
414         if (err) {
415                 CERROR("Can't init data layer FID infrastructure, "
416                        "rc %d\n", err);
417                 GOTO(out_dt, err);
418         }
419         
420         spin_lock(&sbi->ll_lco.lco_lock);
421         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
422         spin_unlock(&sbi->ll_lco.lco_lock);
423
424         ll_init_ea_size(sbi->ll_md_exp, sbi->ll_dt_exp);
425
426         err = obd_prep_async_page(sbi->ll_dt_exp, NULL, NULL, NULL,
427                                   0, NULL, NULL, NULL);
428         if (err < 0) {
429                 LCONSOLE_ERROR_MSG(0x151, "There are no OST's in this "
430                                    "filesystem. There must be at least one "
431                                    "active OST for a client to start.\n");
432                 GOTO(out_dt_fid, err);
433         }
434
435         if (!ll_async_page_slab) {
436                 ll_async_page_slab_size =
437                         size_round(sizeof(struct ll_async_page)) + err;
438                 ll_async_page_slab = cfs_mem_cache_create("ll_async_page",
439                                                           ll_async_page_slab_size,
440                                                           0, 0);
441                 if (!ll_async_page_slab)
442                         GOTO(out_dt_fid, err = -ENOMEM);
443         }
444
445         err = md_getstatus(sbi->ll_md_exp, &rootfid, &oc);
446         if (err) {
447                 CERROR("cannot mds_connect: rc = %d\n", err);
448                 GOTO(out_dt_fid, err);
449         }
450         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&rootfid));
451         sbi->ll_root_fid = rootfid;
452
453         sb->s_op = &lustre_super_operations;
454         sb->s_export_op = &lustre_export_operations;
455
456         /* make root inode
457          * XXX: move this to after cbd setup? */
458         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMDSCAPA;
459         if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
460                 valid |= OBD_MD_FLRMTPERM;
461         else if (sbi->ll_flags & LL_SBI_ACL)
462                 valid |= OBD_MD_FLACL;
463
464         err = md_getattr(sbi->ll_md_exp, &rootfid, oc, valid, 0, &request);
465         if (oc)
466                 free_capa(oc);
467         if (err) {
468                 CERROR("md_getattr failed for root: rc = %d\n", err);
469                 GOTO(out_dt_fid, err);
470         }
471         memset(&lmd, 0, sizeof(lmd));
472         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
473                                sbi->ll_md_exp, &lmd);
474         if (err) {
475                 CERROR("failed to understand root inode md: rc = %d\n", err);
476                 ptlrpc_req_finished (request);
477                 GOTO(out_dt_fid, err);
478         }
479
480         LASSERT(fid_is_sane(&sbi->ll_root_fid));
481         root = ll_iget(sb, ll_fid_build_ino(sbi, &sbi->ll_root_fid), &lmd);
482         md_free_lustre_md(sbi->ll_md_exp, &lmd);
483         ptlrpc_req_finished(request);
484
485         if (root == NULL || is_bad_inode(root)) {
486                 if (lmd.lsm)
487                         obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
488 #ifdef CONFIG_FS_POSIX_ACL
489                 if (lmd.posix_acl) {
490                         posix_acl_release(lmd.posix_acl);
491                         lmd.posix_acl = NULL;
492                 }
493 #endif
494                 CERROR("lustre_lite: bad iget4 for root\n");
495                 GOTO(out_root, err = -EBADF);
496         }
497
498         err = ll_close_thread_start(&sbi->ll_lcq);
499         if (err) {
500                 CERROR("cannot start close thread: rc %d\n", err);
501                 GOTO(out_root, err);
502         }
503
504 #ifdef CONFIG_FS_POSIX_ACL
505         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
506                 rct_init(&sbi->ll_rct);
507                 et_init(&sbi->ll_et);
508         }
509 #endif
510
511         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
512         err = obd_set_info_async(sbi->ll_dt_exp, strlen("checksum"),"checksum",
513                                  sizeof(checksum), &checksum, NULL);
514
515         sb->s_root = d_alloc_root(root);
516         if (data != NULL)
517                 OBD_FREE(data, sizeof(*data));
518         sb->s_root->d_op = &ll_d_root_ops;
519         RETURN(err);
520 out_root:
521         if (root)
522                 iput(root);
523 out_dt_fid:
524         obd_fid_fini(sbi->ll_dt_exp);
525 out_dt:
526         obd_disconnect(sbi->ll_dt_exp);
527         sbi->ll_dt_exp = NULL;
528 out_md_fid:
529         obd_fid_fini(sbi->ll_md_exp);
530 out_md:
531         obd_disconnect(sbi->ll_md_exp);
532         sbi->ll_md_exp = NULL;
533 out:
534         if (data != NULL)
535                 OBD_FREE_PTR(data);
536         lprocfs_unregister_mountpoint(sbi);
537         return err;
538 }
539
540 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
541 {
542         int size, rc;
543
544         *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
545         size = sizeof(int);
546         rc = obd_get_info(sbi->ll_md_exp, strlen("max_easize"), "max_easize",
547                           &size, lmmsize);
548         if (rc)
549                 CERROR("Get max mdsize error rc %d \n", rc);
550
551         RETURN(rc);
552 }
553
554 void ll_dump_inode(struct inode *inode)
555 {
556         struct list_head *tmp;
557         int dentry_count = 0;
558
559         LASSERT(inode != NULL);
560
561         list_for_each(tmp, &inode->i_dentry)
562                 dentry_count++;
563
564         CERROR("inode %p dump: dev=%s ino=%lu mode=%o count=%u, %d dentries\n",
565                inode, ll_i2mdexp(inode)->exp_obd->obd_name, inode->i_ino,
566                inode->i_mode, atomic_read(&inode->i_count), dentry_count);
567 }
568
569 void lustre_dump_dentry(struct dentry *dentry, int recur)
570 {
571         struct list_head *tmp;
572         int subdirs = 0;
573
574         LASSERT(dentry != NULL);
575
576         list_for_each(tmp, &dentry->d_subdirs)
577                 subdirs++;
578
579         CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u,"
580                " flags=0x%x, fsdata=%p, %d subdirs\n", dentry,
581                dentry->d_name.len, dentry->d_name.name,
582                dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
583                dentry->d_parent, dentry->d_inode, atomic_read(&dentry->d_count),
584                dentry->d_flags, dentry->d_fsdata, subdirs);
585         if (dentry->d_inode != NULL)
586                 ll_dump_inode(dentry->d_inode);
587
588         if (recur == 0)
589                 return;
590
591         list_for_each(tmp, &dentry->d_subdirs) {
592                 struct dentry *d = list_entry(tmp, struct dentry, d_child);
593                 lustre_dump_dentry(d, recur - 1);
594         }
595 }
596
597 #ifdef HAVE_EXPORT___IGET
598 static void prune_dir_dentries(struct inode *inode)
599 {
600         struct dentry *dentry, *prev = NULL;
601
602         /* due to lustre specific logic, a directory
603          * can have few dentries - a bug from VFS POV */
604 restart:
605         spin_lock(&dcache_lock);
606         if (!list_empty(&inode->i_dentry)) {
607                 dentry = list_entry(inode->i_dentry.prev,
608                                     struct dentry, d_alias);
609                 /* in order to prevent infinite loops we
610                  * break if previous dentry is busy */
611                 if (dentry != prev) {
612                         prev = dentry;
613                         dget_locked(dentry);
614                         spin_unlock(&dcache_lock);
615
616                         /* try to kill all child dentries */
617                         lock_dentry(dentry);
618                         shrink_dcache_parent(dentry);
619                         unlock_dentry(dentry);
620                         dput(dentry);
621
622                         /* now try to get rid of current dentry */
623                         d_prune_aliases(inode);
624                         goto restart;
625                 }
626         }
627         spin_unlock(&dcache_lock);
628 }
629
630 static void prune_deathrow_one(struct ll_inode_info *lli)
631 {
632         struct inode *inode = ll_info2i(lli);
633
634         /* first, try to drop any dentries - they hold a ref on the inode */
635         if (S_ISDIR(inode->i_mode))
636                 prune_dir_dentries(inode);
637         else
638                 d_prune_aliases(inode);
639
640
641         /* if somebody still uses it, leave it */
642         LASSERT(atomic_read(&inode->i_count) > 0);
643         if (atomic_read(&inode->i_count) > 1)
644                 goto out;
645
646         CDEBUG(D_INODE, "inode %lu/%u(%d) looks a good candidate for prune\n",
647                inode->i_ino,inode->i_generation, atomic_read(&inode->i_count));
648
649         /* seems nobody uses it anymore */
650         inode->i_nlink = 0;
651
652 out:
653         iput(inode);
654         return;
655 }
656
657 static void prune_deathrow(struct ll_sb_info *sbi, int try)
658 {
659         struct ll_inode_info *lli;
660         int empty;
661
662         do {
663                 if (need_resched() && try)
664                         break;
665
666                 if (try) {
667                         if (!spin_trylock(&sbi->ll_deathrow_lock))
668                                 break;
669                 } else {
670                         spin_lock(&sbi->ll_deathrow_lock);
671                 }
672
673                 empty = 1;
674                 lli = NULL;
675                 if (!list_empty(&sbi->ll_deathrow)) {
676                         lli = list_entry(sbi->ll_deathrow.next,
677                                          struct ll_inode_info,
678                                          lli_dead_list);
679                         list_del_init(&lli->lli_dead_list);
680                         if (!list_empty(&sbi->ll_deathrow))
681                                 empty = 0;
682                 }
683                 spin_unlock(&sbi->ll_deathrow_lock);
684
685                 if (lli)
686                         prune_deathrow_one(lli);
687
688         } while (empty == 0);
689 }
690 #else /* !HAVE_EXPORT___IGET */
691 #define prune_deathrow(sbi, try) do {} while (0)
692 #endif /* HAVE_EXPORT___IGET */
693
694 void client_common_put_super(struct super_block *sb)
695 {
696         struct ll_sb_info *sbi = ll_s2sbi(sb);
697         ENTRY;
698
699 #ifdef CONFIG_FS_POSIX_ACL
700         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
701                 et_fini(&sbi->ll_et);
702                 rct_fini(&sbi->ll_rct);
703         }
704 #endif
705
706         obd_cancel_unused(sbi->ll_dt_exp, NULL, 0, NULL);
707
708         ll_close_thread_shutdown(sbi->ll_lcq);
709
710         /* destroy inodes in deathrow */
711         prune_deathrow(sbi, 0);
712
713         list_del(&sbi->ll_conn_chain);
714
715         obd_fid_fini(sbi->ll_dt_exp);
716         obd_disconnect(sbi->ll_dt_exp);
717         sbi->ll_dt_exp = NULL;
718
719         lprocfs_unregister_mountpoint(sbi);
720
721         obd_fid_fini(sbi->ll_md_exp);
722         obd_disconnect(sbi->ll_md_exp);
723         sbi->ll_md_exp = NULL;
724
725         EXIT;
726 }
727
728 void ll_kill_super(struct super_block *sb)
729 {
730         struct ll_sb_info *sbi;
731
732         ENTRY;
733
734         /* not init sb ?*/
735         if (!(sb->s_flags & MS_ACTIVE))
736                 return;
737
738         sbi = ll_s2sbi(sb);
739         /* we need restore s_dev from changed for clustred NFS before put_super
740          * because new kernels have cached s_dev and change sb->s_dev in
741          * put_super not affected real removing devices */
742         if (sbi)
743                 sb->s_dev = sbi->ll_sdev_orig;
744         EXIT;
745 }
746
747 char *ll_read_opt(const char *opt, char *data)
748 {
749         char *value;
750         char *retval;
751         ENTRY;
752
753         CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
754         if (strncmp(opt, data, strlen(opt)))
755                 RETURN(NULL);
756         if ((value = strchr(data, '=')) == NULL)
757                 RETURN(NULL);
758
759         value++;
760         OBD_ALLOC(retval, strlen(value) + 1);
761         if (!retval) {
762                 CERROR("out of memory!\n");
763                 RETURN(NULL);
764         }
765
766         memcpy(retval, value, strlen(value)+1);
767         CDEBUG(D_SUPER, "Assigned option: %s, value %s\n", opt, retval);
768         RETURN(retval);
769 }
770
771 static inline int ll_set_opt(const char *opt, char *data, int fl)
772 {
773         if (strncmp(opt, data, strlen(opt)) != 0)
774                 return(0);
775         else
776                 return(fl);
777 }
778
779 /* non-client-specific mount options are parsed in lmd_parse */
780 static int ll_options(char *options, int *flags)
781 {
782         int tmp;
783         char *s1 = options, *s2;
784         ENTRY;
785
786         if (!options) 
787                 RETURN(0);
788
789         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
790
791         while (*s1) {
792                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
793                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
794                 if (tmp) {
795                         *flags |= tmp;
796                         goto next;
797                 }
798                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
799                 if (tmp) {
800                         *flags |= tmp;
801                         goto next;
802                 }
803                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
804                 if (tmp) {
805                         *flags |= tmp;
806                         goto next;
807                 }
808                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
809                 if (tmp) {
810                         *flags &= ~tmp;
811                         goto next;
812                 }
813                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
814                 if (tmp) {
815                         *flags |= tmp;
816                         goto next;
817                 }
818                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
819                 if (tmp) {
820                         *flags &= ~tmp;
821                         goto next;
822                 }
823                 tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
824                 if (tmp) {
825                         /* Ignore deprecated mount option.  The client will
826                          * always try to mount with ACL support, whether this
827                          * is used depends on whether server supports it. */
828                         goto next;
829                 }
830                 tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
831                 if (tmp) {
832                         goto next;
833                 }
834                 tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
835                 if (tmp) {
836                         *flags |= tmp;
837                         goto next;
838                 }
839
840                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
841                 if (tmp) {
842                         *flags |= tmp;
843                         goto next;
844                 }
845                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
846                 if (tmp) {
847                         *flags &= ~tmp;
848                         goto next;
849                 }
850                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
851                 if (tmp) {
852                         *flags |= tmp;
853                         goto next;
854                 }
855                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
856                 if (tmp) {
857                         *flags &= ~tmp;
858                         goto next;
859                 }
860
861                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
862                                    s1);
863                 RETURN(-EINVAL);
864
865 next:
866                 /* Find next opt */
867                 s2 = strchr(s1, ',');
868                 if (s2 == NULL)
869                         break;
870                 s1 = s2 + 1;
871         }
872         RETURN(0);
873 }
874
875 void ll_lli_init(struct ll_inode_info *lli)
876 {
877         lli->lli_inode_magic = LLI_INODE_MAGIC;
878         sema_init(&lli->lli_size_sem, 1);
879         sema_init(&lli->lli_write_sem, 1);
880         lli->lli_flags = 0;
881         lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
882         spin_lock_init(&lli->lli_lock);
883         INIT_LIST_HEAD(&lli->lli_pending_write_llaps);
884         INIT_LIST_HEAD(&lli->lli_close_list);
885         lli->lli_inode_magic = LLI_INODE_MAGIC;
886         sema_init(&lli->lli_och_sem, 1);
887         lli->lli_mds_read_och = lli->lli_mds_write_och = NULL;
888         lli->lli_mds_exec_och = NULL;
889         lli->lli_open_fd_read_count = lli->lli_open_fd_write_count = 0;
890         lli->lli_open_fd_exec_count = 0;
891         INIT_LIST_HEAD(&lli->lli_dead_list);
892         lli->lli_remote_perms = NULL;
893         lli->lli_rmtperm_utime = 0;
894         sema_init(&lli->lli_rmtperm_sem, 1);
895         INIT_LIST_HEAD(&lli->lli_oss_capas);
896 }
897
898 int ll_fill_super(struct super_block *sb)
899 {
900         struct lustre_profile *lprof;
901         struct lustre_sb_info *lsi = s2lsi(sb);
902         struct ll_sb_info *sbi;
903         char  *dt = NULL, *md = NULL;
904         char  *profilenm = get_profile_name(sb);
905         struct config_llog_instance cfg = {0, };
906         char   ll_instance[sizeof(sb) * 2 + 1];
907         int    err;
908         ENTRY;
909
910         CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
911
912         cfs_module_get();
913
914         sb->s_type->fs_flags |= FS_RENAME_DOES_D_MOVE;
915         /* client additional sb info */
916         lsi->lsi_llsbi = sbi = ll_init_sbi();
917         if (!sbi) {
918                 cfs_module_put();
919                 RETURN(-ENOMEM);
920         }
921
922         err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
923         if (err) 
924                 GOTO(out_free, err);
925
926         /* Generate a string unique to this super, in case some joker tries
927            to mount the same fs at two mount points.
928            Use the address of the super itself.*/
929         sprintf(ll_instance, "%p", sb);
930         cfg.cfg_instance = ll_instance;
931         cfg.cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
932
933         /* set up client obds */
934         err = lustre_process_log(sb, profilenm, &cfg);
935         if (err < 0) {
936                 CERROR("Unable to process log: %d\n", err);
937                 GOTO(out_free, err);
938         }
939
940         lprof = class_get_profile(profilenm);
941         if (lprof == NULL) {
942                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
943                                    " read from the MGS.  Does that filesystem "
944                                    "exist?\n", profilenm);
945                 GOTO(out_free, err = -EINVAL);
946         }
947         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
948                lprof->lp_md, lprof->lp_dt);
949
950         OBD_ALLOC(dt, strlen(lprof->lp_dt) +
951                   strlen(ll_instance) + 2);
952         if (!dt)
953                 GOTO(out_free, err = -ENOMEM);
954         sprintf(dt, "%s-%s", lprof->lp_dt, ll_instance);
955
956         OBD_ALLOC(md, strlen(lprof->lp_md) +
957                   strlen(ll_instance) + 2);
958         if (!md)
959                 GOTO(out_free, err = -ENOMEM);
960         sprintf(md, "%s-%s", lprof->lp_md, ll_instance);
961
962         /* connections, registrations, sb setup */
963         err = client_common_fill_super(sb, md, dt);
964
965 out_free:
966         if (md)
967                 OBD_FREE(md, strlen(md) + 1);
968         if (dt)
969                 OBD_FREE(dt, strlen(dt) + 1);
970         if (err) 
971                 ll_put_super(sb);
972         else
973                 LCONSOLE_WARN("Client %s has started\n", profilenm);        
974
975         RETURN(err);
976 } /* ll_fill_super */
977
978
979 void ll_put_super(struct super_block *sb)
980 {
981         struct config_llog_instance cfg;
982         char   ll_instance[sizeof(sb) * 2 + 1];
983         struct obd_device *obd;
984         struct lustre_sb_info *lsi = s2lsi(sb);
985         struct ll_sb_info *sbi = ll_s2sbi(sb);
986         char *profilenm = get_profile_name(sb);
987         int force = 1, next;
988         ENTRY;
989
990         CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
991
992         ll_print_capa_stat(sbi);
993
994         sprintf(ll_instance, "%p", sb);
995         cfg.cfg_instance = ll_instance;
996         lustre_end_log(sb, NULL, &cfg);
997         
998         if (sbi->ll_md_exp) {
999                 obd = class_exp2obd(sbi->ll_md_exp);
1000                 if (obd) 
1001                         force = obd->obd_force;
1002         }
1003         
1004         /* We need to set force before the lov_disconnect in 
1005            lustre_common_put_super, since l_d cleans up osc's as well. */
1006         if (force) {
1007                 next = 0;
1008                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1009                                                      &next)) != NULL) {
1010                         obd->obd_force = force;
1011                 }
1012         }                       
1013
1014         if (sbi->ll_lcq) {
1015                 /* Only if client_common_fill_super succeeded */
1016                 client_common_put_super(sb);
1017         }
1018         next = 0;
1019         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) {
1020                 class_manual_cleanup(obd);
1021         }
1022
1023         if (profilenm)
1024                 class_del_profile(profilenm);
1025
1026         ll_free_sbi(sb);
1027         lsi->lsi_llsbi = NULL;
1028
1029         lustre_common_put_super(sb);
1030
1031         LCONSOLE_WARN("client %s umount complete\n", ll_instance);
1032         
1033         cfs_module_put();
1034
1035         EXIT;
1036 } /* client_put_super */
1037
1038 #ifdef HAVE_REGISTER_CACHE
1039 #include <linux/cache_def.h>
1040 #ifdef HAVE_CACHE_RETURN_INT
1041 static int
1042 #else
1043 static void
1044 #endif
1045 ll_shrink_cache(int priority, unsigned int gfp_mask)
1046 {
1047         struct ll_sb_info *sbi;
1048         int count = 0;
1049
1050         list_for_each_entry(sbi, &ll_super_blocks, ll_list)
1051                 count += llap_shrink_cache(sbi, priority);
1052
1053 #ifdef HAVE_CACHE_RETURN_INT
1054         return count;
1055 #endif
1056 }
1057
1058 struct cache_definition ll_cache_definition = {
1059         .name = "llap_cache",
1060         .shrink = ll_shrink_cache
1061 };
1062 #endif /* HAVE_REGISTER_CACHE */
1063
1064 struct inode *ll_inode_from_lock(struct ldlm_lock *lock)
1065 {
1066         struct inode *inode = NULL;
1067         /* NOTE: we depend on atomic igrab() -bzzz */
1068         lock_res_and_lock(lock);
1069         if (lock->l_ast_data) {
1070                 struct ll_inode_info *lli = ll_i2info(lock->l_ast_data);
1071                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1072                         inode = igrab(lock->l_ast_data);
1073                 } else {
1074                         inode = lock->l_ast_data;
1075                         ldlm_lock_debug(NULL, inode->i_state & I_FREEING ?
1076                                                 D_INFO : D_WARNING,
1077                                         lock, __FILE__, __func__, __LINE__,
1078                                         "l_ast_data %p is bogus: magic %08x",
1079                                         lock->l_ast_data, lli->lli_inode_magic);
1080                         inode = NULL;
1081                 }
1082         }
1083         unlock_res_and_lock(lock);
1084         return inode;
1085 }
1086
1087 static int null_if_equal(struct ldlm_lock *lock, void *data)
1088 {
1089         if (data == lock->l_ast_data) {
1090                 lock->l_ast_data = NULL;
1091
1092                 if (lock->l_req_mode != lock->l_granted_mode)
1093                         LDLM_ERROR(lock,"clearing inode with ungranted lock");
1094         }
1095
1096         return LDLM_ITER_CONTINUE;
1097 }
1098
1099 void ll_clear_inode(struct inode *inode)
1100 {
1101         struct ll_inode_info *lli = ll_i2info(inode);
1102         struct ll_sb_info *sbi = ll_i2sbi(inode);
1103         ENTRY;
1104
1105         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1106                inode->i_generation, inode);
1107
1108         ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1109         md_change_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
1110                          null_if_equal, inode);
1111
1112         LASSERT(!lli->lli_open_fd_write_count);
1113         LASSERT(!lli->lli_open_fd_read_count);
1114         LASSERT(!lli->lli_open_fd_exec_count);
1115
1116         if (lli->lli_mds_write_och)
1117                 ll_md_real_close(inode, FMODE_WRITE);
1118         if (lli->lli_mds_exec_och)
1119                 ll_md_real_close(inode, FMODE_EXEC);
1120         if (lli->lli_mds_read_och)
1121                 ll_md_real_close(inode, FMODE_READ);
1122
1123         if (lli->lli_smd) {
1124                 obd_change_cbdata(sbi->ll_dt_exp, lli->lli_smd,
1125                                   null_if_equal, inode);
1126
1127                 obd_free_memmd(sbi->ll_dt_exp, &lli->lli_smd);
1128                 lli->lli_smd = NULL;
1129         }
1130
1131         if (lli->lli_symlink_name) {
1132                 OBD_FREE(lli->lli_symlink_name,
1133                          strlen(lli->lli_symlink_name) + 1);
1134                 lli->lli_symlink_name = NULL;
1135         }
1136
1137         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1138                 LASSERT(lli->lli_posix_acl == NULL);
1139                 if (lli->lli_remote_perms) {
1140                         free_rmtperm_hash(lli->lli_remote_perms);
1141                         lli->lli_remote_perms = NULL;
1142                 }
1143         }
1144 #ifdef CONFIG_FS_POSIX_ACL
1145         else if (lli->lli_posix_acl) {
1146                 LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
1147                 LASSERT(lli->lli_remote_perms == NULL);
1148                 posix_acl_release(lli->lli_posix_acl);
1149                 lli->lli_posix_acl = NULL;
1150         }
1151 #endif
1152         lli->lli_inode_magic = LLI_INODE_DEAD;
1153
1154 #ifdef HAVE_EXPORT___IGET
1155         spin_lock(&sbi->ll_deathrow_lock);
1156         list_del_init(&lli->lli_dead_list);
1157         spin_unlock(&sbi->ll_deathrow_lock);
1158 #endif
1159         ll_clear_inode_capas(inode);
1160
1161         EXIT;
1162 }
1163
1164 int ll_md_setattr(struct inode *inode, struct md_op_data *op_data,
1165                   struct md_open_data **mod)
1166 {
1167         struct lustre_md md;
1168         struct ll_sb_info *sbi = ll_i2sbi(inode);
1169         struct ptlrpc_request *request = NULL;
1170         int rc;
1171         ENTRY;
1172         
1173         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0, 
1174                                      LUSTRE_OPC_ANY, NULL);
1175         if (IS_ERR(op_data))
1176                 RETURN(PTR_ERR(op_data));
1177
1178         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0, 
1179                         &request, mod);
1180         if (rc) {
1181                 ptlrpc_req_finished(request);
1182                 if (rc == -ENOENT) {
1183                         inode->i_nlink = 0;
1184                         /* Unlinked special device node? Or just a race?
1185                          * Pretend we done everything. */
1186                         if (!S_ISREG(inode->i_mode) &&
1187                             !S_ISDIR(inode->i_mode))
1188                                 rc = inode_setattr(inode, &op_data->op_attr);
1189                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1190                         CERROR("md_setattr fails: rc = %d\n", rc);
1191                 }
1192                 RETURN(rc);
1193         }
1194
1195         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1196                               sbi->ll_md_exp, &md);
1197         if (rc) {
1198                 ptlrpc_req_finished(request);
1199                 RETURN(rc);
1200         }
1201
1202         /* We call inode_setattr to adjust timestamps.
1203          * If there is at least some data in file, we cleared ATTR_SIZE
1204          * above to avoid invoking vmtruncate, otherwise it is important
1205          * to call vmtruncate in inode_setattr to update inode->i_size
1206          * (bug 6196) */
1207         rc = inode_setattr(inode, &op_data->op_attr);
1208
1209         /* Extract epoch data if obtained. */
1210         op_data->op_handle = md.body->handle;
1211         op_data->op_ioepoch = md.body->ioepoch;
1212
1213         ll_update_inode(inode, &md);
1214         ptlrpc_req_finished(request);
1215
1216         RETURN(rc);
1217 }
1218
1219 /* Close IO epoch and send Size-on-MDS attribute update. */
1220 static int ll_setattr_done_writing(struct inode *inode,
1221                                    struct md_op_data *op_data,
1222                                    struct md_open_data *mod)
1223 {
1224         struct ll_inode_info *lli = ll_i2info(inode);
1225         int rc = 0;
1226         ENTRY;
1227         
1228         LASSERT(op_data != NULL);
1229         if (!S_ISREG(inode->i_mode))
1230                 RETURN(0);
1231
1232         CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID" for truncate\n",
1233                op_data->op_ioepoch, PFID(&lli->lli_fid));
1234
1235         op_data->op_flags = MF_EPOCH_CLOSE | MF_SOM_CHANGE;
1236         rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1237         if (rc == -EAGAIN) {
1238                 /* MDS has instructed us to obtain Size-on-MDS attribute
1239                  * from OSTs and send setattr to back to MDS. */
1240                 rc = ll_sizeonmds_update(inode, mod, &op_data->op_handle,
1241                                          op_data->op_ioepoch);
1242         } else if (rc) {
1243                 CERROR("inode %lu mdc truncate failed: rc = %d\n",
1244                        inode->i_ino, rc);
1245         }
1246         RETURN(rc);
1247 }
1248
1249 static int ll_setattr_do_truncate(struct inode *inode, loff_t new_size)
1250 {
1251         struct ll_sb_info *sbi = ll_i2sbi(inode);
1252         struct ll_inode_info *lli = ll_i2info(inode);
1253         struct lov_stripe_md *lsm = lli->lli_smd;
1254         int rc;
1255         ldlm_policy_data_t policy = { .l_extent = {new_size,
1256                                                    OBD_OBJECT_EOF } };
1257         struct lustre_handle lockh = { 0 };
1258         int local_lock = 0; /* 0 - no local lock;
1259                              * 1 - lock taken by lock_extent;
1260                              * 2 - by obd_match*/
1261         int ast_flags;
1262         int err;
1263         ENTRY;
1264
1265         UNLOCK_INODE_MUTEX(inode);
1266         UP_WRITE_I_ALLOC_SEM(inode);
1267
1268         if (sbi->ll_lockless_truncate_enable &&
1269             (sbi->ll_lco.lco_flags & OBD_CONNECT_TRUNCLOCK)) {
1270                 ast_flags = LDLM_FL_BLOCK_GRANTED;
1271                 rc = obd_match(sbi->ll_dt_exp, lsm, LDLM_EXTENT,
1272                                &policy, LCK_PW, &ast_flags, inode, &lockh);
1273                 if (rc > 0) {
1274                         local_lock = 2;
1275                         rc = 0;
1276                 } else if (rc == 0) {
1277                         rc = ll_file_punch(inode, new_size, 1);
1278                 }
1279         } else {
1280                 /* XXX when we fix the AST intents to pass the discard-range
1281                  * XXX extent, make ast_flags always LDLM_AST_DISCARD_DATA
1282                  * XXX here. */
1283                 ast_flags = (new_size == 0) ? LDLM_AST_DISCARD_DATA : 0;
1284                 rc = ll_extent_lock(NULL, inode, lsm, LCK_PW, &policy,
1285                                     &lockh, ast_flags);
1286                 if (likely(rc == 0))
1287                         local_lock = 1;
1288         }
1289
1290 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1291         DOWN_WRITE_I_ALLOC_SEM(inode);
1292         LOCK_INODE_MUTEX(inode);
1293 #else
1294         LOCK_INODE_MUTEX(inode);
1295         DOWN_WRITE_I_ALLOC_SEM(inode);
1296 #endif
1297         if (likely(rc == 0)) {
1298                 /* Only ll_inode_size_lock is taken at this level.
1299                  * lov_stripe_lock() is grabbed by ll_truncate() only over
1300                  * call to obd_adjust_kms().  If vmtruncate returns 0, then
1301                  * ll_truncate dropped ll_inode_size_lock() */
1302                 ll_inode_size_lock(inode, 0);
1303                 if (!local_lock) {
1304                         spin_lock(&lli->lli_lock);
1305                         lli->lli_flags |= LLIF_SRVLOCK;
1306                         spin_unlock(&lli->lli_lock);
1307                 }
1308                 rc = vmtruncate(inode, new_size);
1309                 if (!local_lock) {
1310                         spin_lock(&lli->lli_lock);
1311                         lli->lli_flags &= ~LLIF_SRVLOCK;
1312                         spin_unlock(&lli->lli_lock);
1313                 }
1314                 if (rc != 0) {
1315                         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
1316                         ll_inode_size_unlock(inode, 0);
1317                 }
1318         }
1319
1320         if (local_lock) {
1321                 if (local_lock == 2)
1322                         err = obd_cancel(sbi->ll_dt_exp, lsm, LCK_PW, &lockh);
1323                 else
1324                         err = ll_extent_unlock(NULL, inode, lsm, LCK_PW, &lockh);
1325                 if (unlikely(err != 0)){
1326                         CERROR("extent unlock failed: err=%d,"
1327                                " unlock method =%d\n", err, local_lock);
1328                         if (rc == 0)
1329                                 rc = err;
1330                 }
1331         }
1332         RETURN(rc);
1333 }
1334
1335 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1336  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1337  * keep these values until such a time that objects are allocated for it.
1338  * We do the MDS operations first, as it is checking permissions for us.
1339  * We don't to the MDS RPC if there is nothing that we want to store there,
1340  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1341  * going to do an RPC anyways.
1342  *
1343  * If we are doing a truncate, we will send the mtime and ctime updates
1344  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1345  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1346  * at the same time.
1347  */
1348 int ll_setattr_raw(struct inode *inode, struct iattr *attr)
1349 {
1350         struct ll_inode_info *lli = ll_i2info(inode);
1351         struct lov_stripe_md *lsm = lli->lli_smd;
1352         struct ll_sb_info *sbi = ll_i2sbi(inode);
1353         struct md_op_data *op_data = NULL;
1354         struct md_open_data *mod = NULL;
1355         int ia_valid = attr->ia_valid;
1356         int rc = 0, rc1 = 0;
1357         ENTRY;
1358
1359         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu valid %x\n", inode->i_ino,
1360                attr->ia_valid);
1361         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETATTR, 1);
1362
1363         if (ia_valid & ATTR_SIZE) {
1364                 if (attr->ia_size > ll_file_maxbytes(inode)) {
1365                         CDEBUG(D_INODE, "file too large %llu > "LPU64"\n",
1366                                attr->ia_size, ll_file_maxbytes(inode));
1367                         RETURN(-EFBIG);
1368                 }
1369
1370                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1371         }
1372
1373         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1374         if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
1375                 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
1376                         RETURN(-EPERM);
1377         }
1378
1379         /* We mark all of the fields "set" so MDS/OST does not re-set them */
1380         if (attr->ia_valid & ATTR_CTIME) {
1381                 attr->ia_ctime = CURRENT_TIME;
1382                 attr->ia_valid |= ATTR_CTIME_SET;
1383         }
1384         if (!(ia_valid & ATTR_ATIME_SET) && (attr->ia_valid & ATTR_ATIME)) {
1385                 attr->ia_atime = CURRENT_TIME;
1386                 attr->ia_valid |= ATTR_ATIME_SET;
1387         }
1388         if (!(ia_valid & ATTR_MTIME_SET) && (attr->ia_valid & ATTR_MTIME)) {
1389                 attr->ia_mtime = CURRENT_TIME;
1390                 attr->ia_valid |= ATTR_MTIME_SET;
1391         }
1392         if ((attr->ia_valid & ATTR_CTIME) && !(attr->ia_valid & ATTR_MTIME)) {
1393                 /* To avoid stale mtime on mds, obtain it from ost and send 
1394                    to mds. */
1395                 rc = ll_glimpse_size(inode, 0);
1396                 if (rc) 
1397                         RETURN(rc);
1398                 
1399                 attr->ia_valid |= ATTR_MTIME_SET | ATTR_MTIME;
1400                 attr->ia_mtime = inode->i_mtime;
1401         }
1402
1403         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1404                 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
1405                        LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1406                        CURRENT_SECONDS);
1407
1408         /* NB: ATTR_SIZE will only be set after this point if the size
1409          * resides on the MDS, ie, this file has no objects. */
1410         if (lsm)
1411                 attr->ia_valid &= ~ATTR_SIZE;
1412
1413         /* We always do an MDS RPC, even if we're only changing the size;
1414          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1415
1416         OBD_ALLOC_PTR(op_data);
1417         if (op_data == NULL)
1418                 RETURN(-ENOMEM);
1419
1420         memcpy(&op_data->op_attr, attr, sizeof(*attr));
1421
1422         /* Open epoch for truncate. */
1423         if ((ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM) &&
1424             (ia_valid & ATTR_SIZE))
1425                 op_data->op_flags = MF_EPOCH_OPEN;
1426
1427         rc = ll_md_setattr(inode, op_data, &mod);
1428         if (rc)
1429                 GOTO(out, rc);
1430
1431         if (op_data->op_ioepoch)
1432                 CDEBUG(D_INODE, "Epoch "LPU64" opened on "DFID" for "
1433                        "truncate\n", op_data->op_ioepoch, PFID(&lli->lli_fid));
1434
1435         if (!lsm || !S_ISREG(inode->i_mode)) {
1436                 CDEBUG(D_INODE, "no lsm: not setting attrs on OST\n");
1437                 GOTO(out, rc = 0);
1438         }
1439
1440         /* We really need to get our PW lock before we change inode->i_size.
1441          * If we don't we can race with other i_size updaters on our node, like
1442          * ll_file_read.  We can also race with i_size propogation to other
1443          * nodes through dirtying and writeback of final cached pages.  This
1444          * last one is especially bad for racing o_append users on other
1445          * nodes. */
1446         if (ia_valid & ATTR_SIZE) {
1447                 rc = ll_setattr_do_truncate(inode, attr->ia_size);
1448         } else if (ia_valid & (ATTR_MTIME | ATTR_MTIME_SET)) {
1449                 obd_flag flags;
1450                 struct obd_info oinfo = { { { 0 } } };
1451                 struct obdo *oa;
1452
1453                 CDEBUG(D_INODE, "set mtime on OST inode %lu to %lu\n",
1454                        inode->i_ino, LTIME_S(attr->ia_mtime));
1455
1456                 OBDO_ALLOC(oa);
1457                 if (oa) {
1458                         oa->o_id = lsm->lsm_object_id;
1459                         oa->o_gr = lsm->lsm_object_gr;
1460                         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
1461
1462                         flags = OBD_MD_FLTYPE | OBD_MD_FLATIME |
1463                                 OBD_MD_FLMTIME | OBD_MD_FLCTIME |
1464                                 OBD_MD_FLFID | OBD_MD_FLGENER | 
1465                                 OBD_MD_FLGROUP;
1466
1467                         obdo_from_inode(oa, inode, flags);
1468
1469                         oinfo.oi_oa = oa;
1470                         oinfo.oi_md = lsm;
1471                         oinfo.oi_capa = ll_mdscapa_get(inode);
1472
1473                         /* XXX: this looks unnecessary now. */
1474                         rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1475                         capa_put(oinfo.oi_capa);
1476                         if (rc)
1477                                 CERROR("obd_setattr_async fails: rc=%d\n", rc);
1478                         OBDO_FREE(oa);
1479                 } else {
1480                         rc = -ENOMEM;
1481                 }
1482         }
1483         EXIT;
1484 out:
1485         if (op_data) {
1486                 if (op_data->op_ioepoch)
1487                         rc1 = ll_setattr_done_writing(inode, op_data, mod);
1488                 ll_finish_md_op_data(op_data);
1489         }
1490         return rc ? rc : rc1;
1491 }
1492
1493 int ll_setattr(struct dentry *de, struct iattr *attr)
1494 {
1495         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
1496             (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
1497                 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1498
1499         return ll_setattr_raw(de->d_inode, attr);
1500 }
1501
1502 int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1503                        __u64 max_age, __u32 flags)
1504 {
1505         struct ll_sb_info *sbi = ll_s2sbi(sb);
1506         struct obd_statfs obd_osfs;
1507         int rc;
1508         ENTRY;
1509
1510         rc = obd_statfs(class_exp2obd(sbi->ll_md_exp), osfs, max_age, flags);
1511         if (rc) {
1512                 CERROR("md_statfs fails: rc = %d\n", rc);
1513                 RETURN(rc);
1514         }
1515
1516         osfs->os_type = sb->s_magic;
1517
1518         CDEBUG(D_SUPER, "MDC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1519                osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,osfs->os_files);
1520
1521         rc = obd_statfs_rqset(class_exp2obd(sbi->ll_dt_exp),
1522                               &obd_osfs, max_age, flags);
1523         if (rc) {
1524                 CERROR("obd_statfs fails: rc = %d\n", rc);
1525                 RETURN(rc);
1526         }
1527
1528         CDEBUG(D_SUPER, "OSC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1529                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1530                obd_osfs.os_files);
1531
1532         osfs->os_bsize = obd_osfs.os_bsize;
1533         osfs->os_blocks = obd_osfs.os_blocks;
1534         osfs->os_bfree = obd_osfs.os_bfree;
1535         osfs->os_bavail = obd_osfs.os_bavail;
1536
1537         /* If we don't have as many objects free on the OST as inodes
1538          * on the MDS, we reduce the total number of inodes to
1539          * compensate, so that the "inodes in use" number is correct.
1540          */
1541         if (obd_osfs.os_ffree < osfs->os_ffree) {
1542                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1543                         obd_osfs.os_ffree;
1544                 osfs->os_ffree = obd_osfs.os_ffree;
1545         }
1546
1547         RETURN(rc);
1548 }
1549 #ifndef HAVE_STATFS_DENTRY_PARAM
1550 int ll_statfs(struct super_block *sb, struct kstatfs *sfs)
1551 {
1552 #else
1553 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1554 {
1555         struct super_block *sb = de->d_sb;
1556 #endif
1557         struct obd_statfs osfs;
1558         int rc;
1559
1560         CDEBUG(D_VFSTRACE, "VFS Op: at "LPU64" jiffies\n", get_jiffies_64());
1561         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1562
1563         /* For now we will always get up-to-date statfs values, but in the
1564          * future we may allow some amount of caching on the client (e.g.
1565          * from QOS or lprocfs updates). */
1566         rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - 1, 0);
1567         if (rc)
1568                 return rc;
1569
1570         statfs_unpack(sfs, &osfs);
1571
1572         /* We need to downshift for all 32-bit kernels, because we can't
1573          * tell if the kernel is being called via sys_statfs64() or not.
1574          * Stop before overflowing f_bsize - in which case it is better
1575          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
1576         if (sizeof(long) < 8) {
1577                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1578                         sfs->f_bsize <<= 1;
1579
1580                         osfs.os_blocks >>= 1;
1581                         osfs.os_bfree >>= 1;
1582                         osfs.os_bavail >>= 1;
1583                 }
1584         }
1585
1586         sfs->f_blocks = osfs.os_blocks;
1587         sfs->f_bfree = osfs.os_bfree;
1588         sfs->f_bavail = osfs.os_bavail;
1589
1590         return 0;
1591 }
1592
1593 void ll_inode_size_lock(struct inode *inode, int lock_lsm)
1594 {
1595         struct ll_inode_info *lli;
1596         struct lov_stripe_md *lsm;
1597
1598         lli = ll_i2info(inode);
1599         LASSERT(lli->lli_size_sem_owner != current);
1600         down(&lli->lli_size_sem);
1601         LASSERT(lli->lli_size_sem_owner == NULL);
1602         lli->lli_size_sem_owner = current;
1603         lsm = lli->lli_smd;
1604         LASSERTF(lsm != NULL || lock_lsm == 0, "lsm %p, lock_lsm %d\n",
1605                  lsm, lock_lsm);
1606         if (lock_lsm)
1607                 lov_stripe_lock(lsm);
1608 }
1609
1610 void ll_inode_size_unlock(struct inode *inode, int unlock_lsm)
1611 {
1612         struct ll_inode_info *lli;
1613         struct lov_stripe_md *lsm;
1614
1615         lli = ll_i2info(inode);
1616         lsm = lli->lli_smd;
1617         LASSERTF(lsm != NULL || unlock_lsm == 0, "lsm %p, lock_lsm %d\n",
1618                  lsm, unlock_lsm);
1619         if (unlock_lsm)
1620                 lov_stripe_unlock(lsm);
1621         LASSERT(lli->lli_size_sem_owner == current);
1622         lli->lli_size_sem_owner = NULL;
1623         up(&lli->lli_size_sem);
1624 }
1625
1626 static void ll_replace_lsm(struct inode *inode, struct lov_stripe_md *lsm)
1627 {
1628         struct ll_inode_info *lli = ll_i2info(inode);
1629
1630         dump_lsm(D_INODE, lsm);
1631         dump_lsm(D_INODE, lli->lli_smd);
1632         LASSERTF(lsm->lsm_magic == LOV_MAGIC_JOIN,
1633                  "lsm must be joined lsm %p\n", lsm);
1634         obd_free_memmd(ll_i2dtexp(inode), &lli->lli_smd);
1635         CDEBUG(D_INODE, "replace lsm %p to lli_smd %p for inode %lu%u(%p)\n",
1636                lsm, lli->lli_smd, inode->i_ino, inode->i_generation, inode);
1637         lli->lli_smd = lsm;
1638         lli->lli_maxbytes = lsm->lsm_maxbytes;
1639         if (lli->lli_maxbytes > PAGE_CACHE_MAXBYTES)
1640                 lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
1641 }
1642
1643 void ll_update_inode(struct inode *inode, struct lustre_md *md)
1644 {
1645         struct ll_inode_info *lli = ll_i2info(inode);
1646         struct mdt_body *body = md->body;
1647         struct lov_stripe_md *lsm = md->lsm;
1648         struct ll_sb_info *sbi = ll_i2sbi(inode);
1649
1650         LASSERT ((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
1651         if (lsm != NULL) {
1652                 if (lli->lli_smd == NULL) {
1653                         if (lsm->lsm_magic != LOV_MAGIC &&
1654                             lsm->lsm_magic != LOV_MAGIC_JOIN) {
1655                                 dump_lsm(D_ERROR, lsm);
1656                                 LBUG();
1657                         }
1658                         CDEBUG(D_INODE, "adding lsm %p to inode %lu/%u(%p)\n",
1659                                lsm, inode->i_ino, inode->i_generation, inode);
1660                         /* ll_inode_size_lock() requires it is only called
1661                          * with lli_smd != NULL or lock_lsm == 0 or we can
1662                          * race between lock/unlock.  bug 9547 */
1663                         lli->lli_smd = lsm;
1664                         lli->lli_maxbytes = lsm->lsm_maxbytes;
1665                         if (lli->lli_maxbytes > PAGE_CACHE_MAXBYTES)
1666                                 lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
1667                 } else {
1668                         if (lli->lli_smd->lsm_magic == lsm->lsm_magic &&
1669                              lli->lli_smd->lsm_stripe_count ==
1670                                         lsm->lsm_stripe_count) {
1671                                 if (lov_stripe_md_cmp(lli->lli_smd, lsm)) {
1672                                         CERROR("lsm mismatch for inode %ld\n",
1673                                                 inode->i_ino);
1674                                         CERROR("lli_smd:\n");
1675                                         dump_lsm(D_ERROR, lli->lli_smd);
1676                                         CERROR("lsm:\n");
1677                                         dump_lsm(D_ERROR, lsm);
1678                                         LBUG();
1679                                 }
1680                         } else
1681                                 ll_replace_lsm(inode, lsm);
1682                 }
1683                 if (lli->lli_smd != lsm)
1684                         obd_free_memmd(ll_i2dtexp(inode), &lsm);
1685         }
1686
1687         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1688                 if (body->valid & OBD_MD_FLRMTPERM)
1689                         ll_update_remote_perm(inode, md->remote_perm);
1690         }
1691 #ifdef CONFIG_FS_POSIX_ACL
1692         else if (body->valid & OBD_MD_FLACL) {
1693                 spin_lock(&lli->lli_lock);
1694                 if (lli->lli_posix_acl)
1695                         posix_acl_release(lli->lli_posix_acl);
1696                 lli->lli_posix_acl = md->posix_acl;
1697                 spin_unlock(&lli->lli_lock);
1698         }
1699 #endif
1700         if (body->valid & OBD_MD_FLATIME &&
1701             body->atime > LTIME_S(inode->i_atime))
1702                 LTIME_S(inode->i_atime) = body->atime;
1703         
1704         /* mtime is always updated with ctime, but can be set in past.
1705            As write and utime(2) may happen within 1 second, and utime's
1706            mtime has a priority over write's one, so take mtime from mds 
1707            for the same ctimes. */
1708         if (body->valid & OBD_MD_FLCTIME &&
1709             body->ctime >= LTIME_S(inode->i_ctime)) {
1710                 LTIME_S(inode->i_ctime) = body->ctime;
1711                 if (body->valid & OBD_MD_FLMTIME) {
1712                         CDEBUG(D_INODE, "setting ino %lu mtime "
1713                                "from %lu to "LPU64"\n", inode->i_ino, 
1714                                LTIME_S(inode->i_mtime), body->mtime);
1715                         LTIME_S(inode->i_mtime) = body->mtime;
1716                 }
1717         }
1718         if (body->valid & OBD_MD_FLMODE)
1719                 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1720         if (body->valid & OBD_MD_FLTYPE)
1721                 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1722         if (S_ISREG(inode->i_mode)) {
1723                 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, LL_MAX_BLKSIZE_BITS);
1724         } else {
1725                 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1726         }
1727 #ifdef HAVE_INODE_BLKSIZE
1728         inode->i_blksize = 1<<inode->i_blkbits;
1729 #endif
1730         if (body->valid & OBD_MD_FLUID)
1731                 inode->i_uid = body->uid;
1732         if (body->valid & OBD_MD_FLGID)
1733                 inode->i_gid = body->gid;
1734         if (body->valid & OBD_MD_FLFLAGS)
1735                 inode->i_flags = ll_ext_to_inode_flags(body->flags);
1736         if (body->valid & OBD_MD_FLNLINK)
1737                 inode->i_nlink = body->nlink;
1738         if (body->valid & OBD_MD_FLRDEV)
1739                 inode->i_rdev = old_decode_dev(body->rdev);
1740
1741         if (body->valid & OBD_MD_FLID) {
1742                 /* FID shouldn't be changed! */
1743                 if (fid_is_sane(&lli->lli_fid)) {
1744                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1745                                  "Trying to change FID "DFID
1746                                  " to the "DFID", inode %lu/%u(%p)\n",
1747                                  PFID(&lli->lli_fid), PFID(&body->fid1),
1748                                  inode->i_ino, inode->i_generation, inode);
1749                 } else 
1750                         lli->lli_fid = body->fid1;
1751         }
1752
1753         LASSERT(fid_seq(&lli->lli_fid) != 0);
1754
1755         if (body->valid & OBD_MD_FLSIZE) {
1756                 if ((ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM) &&
1757                     S_ISREG(inode->i_mode) && lli->lli_smd) {
1758                         struct lustre_handle lockh;
1759                         ldlm_mode_t mode;
1760                         
1761                         /* As it is possible a blocking ast has been processed
1762                          * by this time, we need to check there is an UPDATE 
1763                          * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1764                          * it. */
1765                         mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1766                                                &lockh);
1767                         if (mode) {
1768                                 if (lli->lli_flags & (LLIF_DONE_WRITING |
1769                                                       LLIF_EPOCH_PENDING |
1770                                                       LLIF_SOM_DIRTY)) {
1771                                         CERROR("ino %lu flags %lu still has "
1772                                                "size authority! do not trust "
1773                                                "the size got from MDS\n",
1774                                                inode->i_ino, lli->lli_flags);
1775                                 } else {
1776                                         /* Use old size assignment to avoid
1777                                          * deadlock bz14138 & bz14326 */
1778                                         inode->i_size = body->size;
1779                                         lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1780                                 }
1781                                 ldlm_lock_decref(&lockh, mode);
1782                         }
1783                 } else {
1784                         /* Use old size assignment to avoid
1785                          * deadlock bz14138 & bz14326 */
1786                         inode->i_size = body->size;
1787                 }
1788
1789                 if (body->valid & OBD_MD_FLBLOCKS)
1790                         inode->i_blocks = body->blocks;
1791         }
1792
1793         if (body->valid & OBD_MD_FLMDSCAPA) {
1794                 LASSERT(md->mds_capa);
1795                 ll_add_capa(inode, md->mds_capa);
1796         }
1797         if (body->valid & OBD_MD_FLOSSCAPA) {
1798                 LASSERT(md->oss_capa);
1799                 ll_add_capa(inode, md->oss_capa);
1800         }
1801 }
1802
1803 static struct backing_dev_info ll_backing_dev_info = {
1804         .ra_pages       = 0,    /* No readahead */
1805 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
1806         .capabilities   = 0,    /* Does contribute to dirty memory */
1807 #else
1808         .memory_backed  = 0,    /* Does contribute to dirty memory */
1809 #endif
1810 };
1811
1812 void ll_read_inode2(struct inode *inode, void *opaque)
1813 {
1814         struct lustre_md *md = opaque;
1815         struct ll_inode_info *lli = ll_i2info(inode);
1816         ENTRY;
1817
1818         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
1819                inode->i_ino, inode->i_generation, inode);
1820
1821         ll_lli_init(lli);
1822
1823         LASSERT(!lli->lli_smd);
1824
1825         /* Core attributes from the MDS first.  This is a new inode, and
1826          * the VFS doesn't zero times in the core inode so we have to do
1827          * it ourselves.  They will be overwritten by either MDS or OST
1828          * attributes - we just need to make sure they aren't newer. */
1829         LTIME_S(inode->i_mtime) = 0;
1830         LTIME_S(inode->i_atime) = 0;
1831         LTIME_S(inode->i_ctime) = 0;
1832         inode->i_rdev = 0;
1833         ll_update_inode(inode, md);
1834
1835         /* OIDEBUG(inode); */
1836
1837         if (S_ISREG(inode->i_mode)) {
1838                 struct ll_sb_info *sbi = ll_i2sbi(inode);
1839                 inode->i_op = &ll_file_inode_operations;
1840                 inode->i_fop = sbi->ll_fop;
1841                 inode->i_mapping->a_ops = &ll_aops;
1842                 EXIT;
1843         } else if (S_ISDIR(inode->i_mode)) {
1844                 inode->i_op = &ll_dir_inode_operations;
1845                 inode->i_fop = &ll_dir_operations;
1846                 inode->i_mapping->a_ops = &ll_dir_aops;
1847                 EXIT;
1848         } else if (S_ISLNK(inode->i_mode)) {
1849                 inode->i_op = &ll_fast_symlink_inode_operations;
1850                 EXIT;
1851         } else {
1852                 inode->i_op = &ll_special_inode_operations;
1853
1854                 init_special_inode(inode, inode->i_mode,
1855                                    kdev_t_to_nr(inode->i_rdev));
1856
1857                 /* initializing backing dev info. */
1858                 inode->i_mapping->backing_dev_info = &ll_backing_dev_info;
1859
1860                 EXIT;
1861         }
1862 }
1863
1864 void ll_delete_inode(struct inode *inode)
1865 {
1866         struct ll_sb_info *sbi = ll_i2sbi(inode);
1867         int rc;
1868         ENTRY;
1869
1870         rc = obd_fid_delete(sbi->ll_md_exp, ll_inode2fid(inode));
1871         if (rc) {
1872                 CERROR("fid_delete() failed, rc %d\n", rc);
1873         }
1874         truncate_inode_pages(&inode->i_data, 0);
1875         clear_inode(inode);
1876
1877         EXIT;
1878 }
1879
1880 int ll_iocontrol(struct inode *inode, struct file *file,
1881                  unsigned int cmd, unsigned long arg)
1882 {
1883         struct ll_sb_info *sbi = ll_i2sbi(inode);
1884         struct ptlrpc_request *req = NULL;
1885         int rc, flags = 0;
1886         ENTRY;
1887
1888         switch(cmd) {
1889         case EXT3_IOC_GETFLAGS: {
1890                 struct mdt_body *body;
1891                 struct obd_capa *oc;
1892
1893                 oc = ll_mdscapa_get(inode);
1894                 rc = md_getattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
1895                                 OBD_MD_FLFLAGS, 0, &req);
1896                 capa_put(oc);
1897                 if (rc) {
1898                         CERROR("failure %d inode %lu\n", rc, inode->i_ino);
1899                         RETURN(-abs(rc));
1900                 }
1901
1902                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1903
1904                 flags = body->flags;
1905
1906                 ptlrpc_req_finished(req);
1907
1908                 RETURN(put_user(flags, (int *)arg));
1909         }
1910         case EXT3_IOC_SETFLAGS: {
1911                 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1912                 struct obd_info oinfo = { { { 0 } } };
1913                 struct md_op_data *op_data;
1914
1915                 if (get_user(flags, (int *)arg))
1916                         RETURN(-EFAULT);
1917
1918                 oinfo.oi_md = lsm;
1919                 OBDO_ALLOC(oinfo.oi_oa);
1920                 if (!oinfo.oi_oa)
1921                         RETURN(-ENOMEM);
1922
1923                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1924                                              LUSTRE_OPC_ANY, NULL);
1925                 if (IS_ERR(op_data))
1926                         RETURN(PTR_ERR(op_data));
1927
1928                 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
1929                 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1930                 rc = md_setattr(sbi->ll_md_exp, op_data,
1931                                 NULL, 0, NULL, 0, &req, NULL);
1932                 ll_finish_md_op_data(op_data);
1933                 ptlrpc_req_finished(req);
1934                 if (rc || lsm == NULL) {
1935                         OBDO_FREE(oinfo.oi_oa);
1936                         RETURN(rc);
1937                 }
1938
1939                 oinfo.oi_oa->o_id = lsm->lsm_object_id;
1940                 oinfo.oi_oa->o_gr = lsm->lsm_object_gr;
1941                 oinfo.oi_oa->o_flags = flags;
1942                 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS | 
1943                                        OBD_MD_FLGROUP;
1944                 oinfo.oi_capa = ll_mdscapa_get(inode);
1945
1946                 obdo_from_inode(oinfo.oi_oa, inode,
1947                                 OBD_MD_FLFID | OBD_MD_FLGENER);
1948                 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1949                 capa_put(oinfo.oi_capa);
1950                 OBDO_FREE(oinfo.oi_oa);
1951                 if (rc) {
1952                         if (rc != -EPERM && rc != -EACCES)
1953                                 CERROR("md_setattr_async fails: rc = %d\n", rc);
1954                         RETURN(rc);
1955                 }
1956
1957                 inode->i_flags = ll_ext_to_inode_flags(flags |
1958                                                        MDS_BFLAG_EXT_FLAGS);
1959                 RETURN(0);
1960         }
1961         default:
1962                 RETURN(-ENOSYS);
1963         }
1964
1965         RETURN(0);
1966 }
1967
1968 int ll_flush_ctx(struct inode *inode)
1969 {
1970         struct ll_sb_info  *sbi = ll_i2sbi(inode);
1971
1972         CDEBUG(D_SEC, "flush context for user %d\n", current->uid);
1973
1974         obd_set_info_async(sbi->ll_md_exp,
1975                            sizeof(KEY_FLUSH_CTX) - 1, KEY_FLUSH_CTX,
1976                            0, NULL, NULL);
1977         obd_set_info_async(sbi->ll_dt_exp,
1978                            sizeof(KEY_FLUSH_CTX) - 1, KEY_FLUSH_CTX,
1979                            0, NULL, NULL);
1980         return 0;
1981 }
1982
1983 /* umount -f client means force down, don't save state */
1984 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
1985 void ll_umount_begin(struct vfsmount *vfsmnt, int flags)
1986 {
1987         struct super_block *sb = vfsmnt->mnt_sb;
1988 #else
1989 void ll_umount_begin(struct super_block *sb)
1990 {
1991 #endif
1992         struct lustre_sb_info *lsi = s2lsi(sb);
1993         struct ll_sb_info *sbi = ll_s2sbi(sb);
1994         struct obd_device *obd;
1995         struct obd_ioctl_data ioc_data = { 0 };
1996         ENTRY;
1997
1998 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
1999         if (!(flags & MNT_FORCE)) {
2000                 EXIT;
2001                 return;
2002         }
2003 #endif
2004
2005         /* Tell the MGC we got umount -f */
2006         lsi->lsi_flags |= LSI_UMOUNT_FORCE;
2007
2008         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2009                sb->s_count, atomic_read(&sb->s_active));
2010
2011         obd = class_exp2obd(sbi->ll_md_exp);
2012         if (obd == NULL) {
2013                 CERROR("Invalid MDC connection handle "LPX64"\n",
2014                        sbi->ll_md_exp->exp_handle.h_cookie);
2015                 EXIT;
2016                 return;
2017         }
2018         obd->obd_force = 1;
2019         obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp, sizeof ioc_data,
2020                       &ioc_data, NULL);
2021
2022         obd = class_exp2obd(sbi->ll_dt_exp);
2023         if (obd == NULL) {
2024                 CERROR("Invalid LOV connection handle "LPX64"\n",
2025                        sbi->ll_dt_exp->exp_handle.h_cookie);
2026                 EXIT;
2027                 return;
2028         }
2029
2030         obd->obd_force = 1;
2031         obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp, sizeof ioc_data,
2032                       &ioc_data, NULL);
2033
2034         /* Really, we'd like to wait until there are no requests outstanding,
2035          * and then continue.  For now, we just invalidate the requests,
2036          * schedule, and hope.
2037          */
2038         schedule();
2039
2040         EXIT;
2041 }
2042
2043 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2044 {
2045         struct ll_sb_info *sbi = ll_s2sbi(sb);
2046         int err;
2047         __u32 read_only;
2048
2049         if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2050                 read_only = *flags & MS_RDONLY;
2051                 err = obd_set_info_async(sbi->ll_md_exp,
2052                                          sizeof(KEY_READ_ONLY) - 1,
2053                                          KEY_READ_ONLY, sizeof(read_only),
2054                                          &read_only, NULL);
2055                 if (err) {
2056                         CERROR("Failed to change the read-only flag during "
2057                                "remount: %d\n", err);
2058                         return err;
2059                 }
2060
2061                 if (read_only)
2062                         sb->s_flags |= MS_RDONLY;
2063                 else
2064                         sb->s_flags &= ~MS_RDONLY;
2065         }
2066         return 0;
2067 }
2068
2069 int ll_prep_inode(struct inode **inode,
2070                   struct ptlrpc_request *req,
2071                   struct super_block *sb)
2072 {
2073         struct ll_sb_info *sbi = NULL;
2074         struct lustre_md md;
2075         int rc = 0;
2076         ENTRY;
2077
2078         LASSERT(*inode || sb);
2079         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2080         prune_deathrow(sbi, 1);
2081         memset(&md, 0, sizeof(struct lustre_md));
2082
2083         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2084                               sbi->ll_md_exp, &md);
2085         if (rc)
2086                 RETURN(rc);
2087
2088         if (*inode) {
2089                 ll_update_inode(*inode, &md);
2090         } else {
2091                 LASSERT(sb != NULL);
2092
2093                 /*
2094                  * At this point server returns to client's same fid as client
2095                  * generated for creating. So using ->fid1 is okay here.
2096                  */
2097                 LASSERT(fid_is_sane(&md.body->fid1));
2098
2099                 *inode = ll_iget(sb, ll_fid_build_ino(sbi, &md.body->fid1), &md);
2100                 if (*inode == NULL || is_bad_inode(*inode)) {
2101                         if (md.lsm)
2102                                 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2103 #ifdef CONFIG_FS_POSIX_ACL
2104                         if (md.posix_acl) {
2105                                 posix_acl_release(md.posix_acl);
2106                                 md.posix_acl = NULL;
2107                         }
2108 #endif
2109                         rc = -ENOMEM;
2110                         CERROR("new_inode -fatal: rc %d\n", rc);
2111                         GOTO(out, rc);
2112                 }
2113         }
2114
2115         rc = obd_checkmd(sbi->ll_dt_exp, sbi->ll_md_exp,
2116                          ll_i2info(*inode)->lli_smd);
2117 out:
2118         md_free_lustre_md(sbi->ll_md_exp, &md);
2119         RETURN(rc);
2120 }
2121
2122 char *llap_origins[] = {
2123         [LLAP_ORIGIN_UNKNOWN] = "--",
2124         [LLAP_ORIGIN_READPAGE] = "rp",
2125         [LLAP_ORIGIN_READAHEAD] = "ra",
2126         [LLAP_ORIGIN_COMMIT_WRITE] = "cw",
2127         [LLAP_ORIGIN_WRITEPAGE] = "wp",
2128         [LLAP_ORIGIN_LOCKLESS_IO] = "ls"
2129 };
2130
2131 struct ll_async_page *llite_pglist_next_llap(struct ll_sb_info *sbi,
2132                                              struct list_head *list)
2133 {
2134         struct ll_async_page *llap;
2135         struct list_head *pos;
2136
2137         list_for_each(pos, list) {
2138                 if (pos == &sbi->ll_pglist)
2139                         return NULL;
2140                 llap = list_entry(pos, struct ll_async_page, llap_pglist_item);
2141                 if (llap->llap_page == NULL)
2142                         continue;
2143                 return llap;
2144         }
2145         LBUG();
2146         return NULL;
2147 }
2148
2149 int ll_obd_statfs(struct inode *inode, void *arg)
2150 {
2151         struct ll_sb_info *sbi = NULL;
2152         struct obd_export *exp;
2153         char *buf = NULL;
2154         struct obd_ioctl_data *data = NULL;
2155         __u32 type;
2156         int len = 0, rc;
2157
2158         if (!inode || !(sbi = ll_i2sbi(inode)))
2159                 GOTO(out_statfs, rc = -EINVAL);
2160
2161         rc = obd_ioctl_getdata(&buf, &len, arg);
2162         if (rc)
2163                 GOTO(out_statfs, rc);
2164
2165         data = (void*)buf;
2166         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2167             !data->ioc_pbuf1 || !data->ioc_pbuf2)
2168                 GOTO(out_statfs, rc = -EINVAL);
2169
2170         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2171         if (type == LL_STATFS_MDC)
2172                 exp = sbi->ll_md_exp;
2173         else if (type == LL_STATFS_LOV)
2174                 exp = sbi->ll_dt_exp;
2175         else 
2176                 GOTO(out_statfs, rc = -ENODEV);
2177
2178         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
2179         if (rc)
2180                 GOTO(out_statfs, rc);
2181 out_statfs:
2182         if (buf)
2183                 obd_ioctl_freedata(buf, len);
2184         return rc;
2185 }
2186
2187 int ll_process_config(struct lustre_cfg *lcfg)
2188 {
2189         char *ptr;
2190         void *sb;
2191         struct lprocfs_static_vars lvars;
2192         unsigned long x; 
2193         int rc = 0;
2194
2195         lprocfs_llite_init_vars(&lvars);
2196
2197         /* The instance name contains the sb: lustre-client-aacfe000 */
2198         ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2199         if (!ptr || !*(++ptr)) 
2200                 return -EINVAL;
2201         if (sscanf(ptr, "%lx", &x) != 1)
2202                 return -EINVAL;
2203         sb = (void *)x;
2204         /* This better be a real Lustre superblock! */
2205         LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2206
2207         /* Note we have not called client_common_fill_super yet, so 
2208            proc fns must be able to handle that! */
2209         rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2210                                       lcfg, sb);
2211         return(rc);
2212 }
2213
2214 /* this function prepares md_op_data hint for passing ot down to MD stack. */
2215 struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data,
2216                                        struct inode *i1, struct inode *i2,
2217                                        const char *name, int namelen,
2218                                        int mode, __u32 opc, void *data)
2219 {
2220         LASSERT(i1 != NULL);
2221
2222         if (namelen > ll_i2sbi(i1)->ll_namelen)
2223                 return ERR_PTR(-ENAMETOOLONG);
2224         
2225         if (op_data == NULL)
2226                 OBD_ALLOC_PTR(op_data);
2227         
2228         if (op_data == NULL)
2229                 return ERR_PTR(-ENOMEM);
2230
2231         ll_i2gids(op_data->op_suppgids, i1, i2);
2232         op_data->op_fid1 = *ll_inode2fid(i1);
2233         op_data->op_capa1 = ll_mdscapa_get(i1);
2234
2235         if (i2) {
2236                 op_data->op_fid2 = *ll_inode2fid(i2);
2237                 op_data->op_capa2 = ll_mdscapa_get(i2);
2238         } else {
2239                 fid_zero(&op_data->op_fid2);
2240         }
2241
2242         op_data->op_name = name;
2243         op_data->op_namelen = namelen;
2244         op_data->op_mode = mode;
2245         op_data->op_mod_time = CURRENT_SECONDS;
2246         op_data->op_fsuid = current->fsuid;
2247         op_data->op_fsgid = current->fsgid;
2248         op_data->op_cap = current->cap_effective;
2249         op_data->op_bias = MDS_CHECK_SPLIT;
2250         op_data->op_opc = opc;
2251         op_data->op_mds = 0;
2252         op_data->op_data = data;
2253
2254         return op_data;
2255 }
2256
2257 void ll_finish_md_op_data(struct md_op_data *op_data)
2258 {
2259         capa_put(op_data->op_capa1);
2260         capa_put(op_data->op_capa2);
2261         OBD_FREE_PTR(op_data);
2262 }