Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Light Super operations
5  *
6  *  Copyright (c) 2002-2005 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LLITE
25
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/random.h>
29 #include <linux/version.h>
30
31 #include <lustre_lite.h>
32 #include <lustre_ha.h>
33 #include <lustre_dlm.h>
34 #include <lprocfs_status.h>
35 #include <lustre_disk.h>
36 #include <lustre_param.h>
37 #include <lustre_log.h>
38 #include "llite_internal.h"
39
40 cfs_mem_cache_t *ll_file_data_slab;
41
42 LIST_HEAD(ll_super_blocks);
43 spinlock_t ll_sb_lock = SPIN_LOCK_UNLOCKED;
44
45 extern struct address_space_operations ll_aops;
46 extern struct address_space_operations ll_dir_aops;
47
48 #ifndef log2
49 #define log2(n) ffz(~(n))
50 #endif
51
52
53 static struct ll_sb_info *ll_init_sbi(void)
54 {
55         struct ll_sb_info *sbi = NULL;
56         class_uuid_t uuid;
57         int i;
58         ENTRY;
59
60         OBD_ALLOC(sbi, sizeof(*sbi));
61         if (!sbi)
62                 RETURN(NULL);
63
64         spin_lock_init(&sbi->ll_lock);
65         spin_lock_init(&sbi->ll_lco.lco_lock);
66         spin_lock_init(&sbi->ll_pp_extent_lock);
67         spin_lock_init(&sbi->ll_process_lock);
68         sbi->ll_rw_stats_on = 0;
69         INIT_LIST_HEAD(&sbi->ll_pglist);
70         if (num_physpages >> (20 - CFS_PAGE_SHIFT) < 512)
71                 sbi->ll_async_page_max = num_physpages / 2;
72         else
73                 sbi->ll_async_page_max = (num_physpages / 4) * 3;
74         sbi->ll_ra_info.ra_max_pages = min(num_physpages / 8,
75                                            SBI_DEFAULT_READAHEAD_MAX);
76         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
77                                            SBI_DEFAULT_READAHEAD_WHOLE_MAX;
78
79         INIT_LIST_HEAD(&sbi->ll_conn_chain);
80         INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
81
82         ll_generate_random_uuid(uuid);
83         class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
84         CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
85
86         spin_lock(&ll_sb_lock);
87         list_add_tail(&sbi->ll_list, &ll_super_blocks);
88         spin_unlock(&ll_sb_lock);
89
90 #ifdef ENABLE_LLITE_CHECKSUM
91         sbi->ll_flags |= LL_SBI_CHECKSUM;
92 #endif
93
94 #ifdef HAVE_LRU_RESIZE_SUPPORT
95         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
96 #endif
97
98 #ifdef HAVE_EXPORT___IGET
99         INIT_LIST_HEAD(&sbi->ll_deathrow);
100         spin_lock_init(&sbi->ll_deathrow_lock);
101 #endif
102         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
103                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].pp_r_hist.oh_lock);
104                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].pp_w_hist.oh_lock);
105         }
106
107         RETURN(sbi);
108 }
109
110 void ll_free_sbi(struct super_block *sb)
111 {
112         struct ll_sb_info *sbi = ll_s2sbi(sb);
113         ENTRY;
114
115         if (sbi != NULL) {
116                 spin_lock(&ll_sb_lock);
117                 list_del(&sbi->ll_list);
118                 spin_unlock(&ll_sb_lock);
119                 OBD_FREE(sbi, sizeof(*sbi));
120         }
121         EXIT;
122 }
123
124 static struct dentry_operations ll_d_root_ops = {
125 #ifdef DCACHE_LUSTRE_INVALID
126         .d_compare = ll_dcompare,
127 #endif
128 };
129
130 /* Initialize the default and maximum LOV EA and cookie sizes.  This allows
131  * us to make MDS RPCs with large enough reply buffers to hold the
132  * maximum-sized (= maximum striped) EA and cookie without having to
133  * calculate this (via a call into the LOV + OSCs) each time we make an RPC. */
134 static int ll_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
135 {
136         struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC };
137         __u32 valsize = sizeof(struct lov_desc);
138         int rc, easize, def_easize, cookiesize;
139         struct lov_desc desc;
140         __u32 stripes;
141         ENTRY;
142
143         rc = obd_get_info(dt_exp, strlen(KEY_LOVDESC) + 1, KEY_LOVDESC,
144                           &valsize, &desc);
145         if (rc)
146                 RETURN(rc);
147
148         stripes = min(desc.ld_tgt_count, (__u32)LOV_MAX_STRIPE_COUNT);
149         lsm.lsm_stripe_count = stripes;
150         easize = obd_size_diskmd(dt_exp, &lsm);
151
152         lsm.lsm_stripe_count = desc.ld_default_stripe_count;
153         def_easize = obd_size_diskmd(dt_exp, &lsm);
154
155         cookiesize = stripes * sizeof(struct llog_cookie);
156
157         CDEBUG(D_HA, "updating max_mdsize/max_cookiesize: %d/%d\n",
158                easize, cookiesize);
159
160         rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize);
161         RETURN(rc);
162 }
163
164 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
165 {
166         struct inode *root = 0;
167         struct ll_sb_info *sbi = ll_s2sbi(sb);
168         struct obd_device *obd;
169         struct lu_fid rootfid;
170         struct obd_capa *oc = NULL;
171         struct obd_statfs osfs;
172         struct ptlrpc_request *request = NULL;
173         struct lustre_handle dt_conn = {0, };
174         struct lustre_handle md_conn = {0, };
175         struct obd_connect_data *data = NULL;
176         struct lustre_md lmd;
177         obd_valid valid;
178         int size, err, checksum;
179         ENTRY;
180
181         obd = class_name2obd(md);
182         if (!obd) {
183                 CERROR("MD %s: not setup or attached\n", md);
184                 RETURN(-EINVAL);
185         }
186
187         OBD_ALLOC_PTR(data);
188         if (data == NULL)
189                 RETURN(-ENOMEM);
190
191         if (proc_lustre_fs_root) {
192                 err = lprocfs_register_mountpoint(proc_lustre_fs_root, sb,
193                                                   dt, md);
194                 if (err < 0)
195                         CERROR("could not register mount in /proc/lustre");
196         }
197
198         /* indicate the features supported by this client */
199         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
200                                   OBD_CONNECT_JOIN     | OBD_CONNECT_ATTRFID  |
201                                   OBD_CONNECT_VERSION  | OBD_CONNECT_MDS_CAPA |
202                                   OBD_CONNECT_OSS_CAPA | OBD_CONNECT_CANCELSET|
203                                   OBD_CONNECT_FID;
204
205 #ifdef HAVE_LRU_RESIZE_SUPPORT
206         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
207                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
208 #endif
209 #ifdef CONFIG_FS_POSIX_ACL
210         data->ocd_connect_flags |= OBD_CONNECT_ACL;
211 #endif
212         data->ocd_ibits_known = MDS_INODELOCK_FULL;
213         data->ocd_version = LUSTRE_VERSION_CODE;
214
215         if (sb->s_flags & MS_RDONLY)
216                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
217         if (sbi->ll_flags & LL_SBI_USER_XATTR)
218                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
219
220 #ifdef HAVE_MS_FLOCK_LOCK
221         /* force vfs to use lustre handler for flock() calls - bug 10743 */
222         sb->s_flags |= MS_FLOCK_LOCK;
223 #endif
224         
225         if (sbi->ll_flags & LL_SBI_FLOCK)
226                 sbi->ll_fop = &ll_file_operations_flock;
227         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
228                 sbi->ll_fop = &ll_file_operations;
229         else
230                 sbi->ll_fop = &ll_file_operations_noflock;
231
232         /* real client */
233         data->ocd_connect_flags |= OBD_CONNECT_REAL;
234         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
235                 data->ocd_connect_flags &= ~OBD_CONNECT_LCL_CLIENT;
236                 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT;
237         } else {
238                 data->ocd_connect_flags &= ~OBD_CONNECT_RMT_CLIENT;
239                 data->ocd_connect_flags |= OBD_CONNECT_LCL_CLIENT;
240         }
241
242         err = obd_connect(NULL, &md_conn, obd, &sbi->ll_sb_uuid, data);
243         if (err == -EBUSY) {
244                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
245                                    "recovery, of which this client is not a "
246                                    "part. Please wait for recovery to complete,"
247                                    " abort, or time out.\n", md);
248                 GOTO(out, err);
249         } else if (err) {
250                 CERROR("cannot connect to %s: rc = %d\n", md, err);
251                 GOTO(out, err);
252         }
253         sbi->ll_md_exp = class_conn2export(&md_conn);
254
255         err = obd_fid_init(sbi->ll_md_exp);
256         if (err) {
257                 CERROR("Can't init metadata layer FID infrastructure, "
258                        "rc %d\n", err);
259                 GOTO(out_md, err);
260         }
261
262         err = obd_statfs(obd, &osfs, cfs_time_current_64() - HZ);
263         if (err)
264                 GOTO(out_md_fid, err);
265
266         size = sizeof(*data);
267         err = obd_get_info(sbi->ll_md_exp, strlen(KEY_CONN_DATA),
268                            KEY_CONN_DATA,  &size, data);
269         if (err) {
270                 CERROR("Get connect data failed: %d \n", err);
271                 GOTO(out_md, err);
272         }
273
274         LASSERT(osfs.os_bsize);
275         sb->s_blocksize = osfs.os_bsize;
276         sb->s_blocksize_bits = log2(osfs.os_bsize);
277         sb->s_magic = LL_SUPER_MAGIC;
278
279         /* for bug 11559. in $LINUX/fs/read_write.c, function do_sendfile():
280          *         retval = in_file->f_op->sendfile(...);
281          *         if (*ppos > max)
282          *                 retval = -EOVERFLOW;
283          *
284          * it will check if *ppos is greater than max. However, max equals to
285          * s_maxbytes, which is a negative integer in a x86_64 box since loff_t
286          * has been defined as a signed long long ineger in linux kernel. */
287 #if BITS_PER_LONG == 64
288         sb->s_maxbytes = PAGE_CACHE_MAXBYTES >> 1;
289 #else
290         sb->s_maxbytes = PAGE_CACHE_MAXBYTES;
291 #endif
292         sbi->ll_namelen = osfs.os_namelen;
293         sbi->ll_max_rw_chunk = LL_DEFAULT_MAX_RW_CHUNK;
294
295         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
296             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
297                 LCONSOLE_INFO("Disabling user_xattr feature because "
298                               "it is not supported on the server\n");
299                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
300         }
301
302         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
303 #ifdef MS_POSIXACL
304                 sb->s_flags |= MS_POSIXACL;
305 #endif
306                 sbi->ll_flags |= LL_SBI_ACL;
307         } else {
308                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
309 #ifdef MS_POSIXACL
310                 sb->s_flags &= ~MS_POSIXACL;
311 #endif
312                 sbi->ll_flags &= ~LL_SBI_ACL;
313         }
314
315         if (data->ocd_connect_flags & OBD_CONNECT_JOIN)
316                 sbi->ll_flags |= LL_SBI_JOIN;
317
318         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
319                 if (!(data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT)) {
320                         /* sometimes local client claims to be remote, but mdt
321                          * will disagree when client gss not applied. */
322                         LCONSOLE_INFO("client claims to be remote, but server "
323                                       "rejected, forced to be local.\n");
324                         sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
325                 }
326         } else {
327                 if (!(data->ocd_connect_flags & OBD_CONNECT_LCL_CLIENT)) {
328                         /* with gss applied, remote client can not claim to be
329                          * local, so mdt maybe force client to be remote. */
330                         LCONSOLE_INFO("client claims to be local, but server "
331                                       "rejected, forced to be remote.\n");
332                         sbi->ll_flags |= LL_SBI_RMT_CLIENT;
333                 }
334         }
335
336         if (data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) {
337                 LCONSOLE_INFO("client enabled MDS capability!\n");
338                 sbi->ll_flags |= LL_SBI_MDS_CAPA;
339         }
340
341         if (data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA) {
342                 LCONSOLE_INFO("client enabled OSS capability!\n");
343                 sbi->ll_flags |= LL_SBI_OSS_CAPA;
344         }
345
346         sbi->ll_sdev_orig = sb->s_dev;
347 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
348         /* We set sb->s_dev equal on all lustre clients in order to support
349          * NFS export clustering.  NFSD requires that the FSID be the same
350          * on all clients. */
351         /* s_dev is also used in lt_compare() to compare two fs, but that is
352          * only a node-local comparison. */
353
354         /* XXX: this will not work with LMV */
355         sb->s_dev = get_uuid2int(sbi2mdc(sbi)->cl_target_uuid.uuid,
356                                  strlen(sbi2mdc(sbi)->cl_target_uuid.uuid));
357 #endif
358
359         obd = class_name2obd(dt);
360         if (!obd) {
361                 CERROR("DT %s: not setup or attached\n", dt);
362                 GOTO(out_md_fid, err = -ENODEV);
363         }
364
365         data->ocd_connect_flags = OBD_CONNECT_GRANT     | OBD_CONNECT_VERSION  |
366                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
367                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID;
368         if (sbi->ll_flags & LL_SBI_OSS_CAPA)
369                 data->ocd_connect_flags |= OBD_CONNECT_OSS_CAPA;
370
371         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
372                 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
373                  * disabled by default, because it can still be enabled on the
374                  * fly via /proc. As a consequence, we still need to come to an
375                  * agreement on the supported algorithms at connect time */
376                 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
377
378                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
379                         data->ocd_cksum_types = OBD_CKSUM_ADLER;
380                 else
381                         /* send the list of supported checksum types */
382                         data->ocd_cksum_types = OBD_CKSUM_ALL;
383         }
384
385 #ifdef HAVE_LRU_RESIZE_SUPPORT
386         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
387 #endif
388         CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d "
389                "ocd_grant: %d\n", data->ocd_connect_flags,
390                data->ocd_version, data->ocd_grant);
391
392         obd->obd_upcall.onu_owner = &sbi->ll_lco;
393         obd->obd_upcall.onu_upcall = ll_ocd_update;
394         data->ocd_brw_size = PTLRPC_MAX_BRW_PAGES << CFS_PAGE_SHIFT;
395
396         err = obd_connect(NULL, &dt_conn, obd, &sbi->ll_sb_uuid, data);
397         if (err == -EBUSY) {
398                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
399                                    "recovery, of which this client is not a "
400                                    "part.  Please wait for recovery to "
401                                    "complete, abort, or time out.\n", dt);
402                 GOTO(out_md_fid, err);
403         } else if (err) {
404                 CERROR("Cannot connect to %s: rc = %d\n", dt, err);
405                 GOTO(out_md_fid, err);
406         }
407
408         sbi->ll_dt_exp = class_conn2export(&dt_conn);
409
410         err = obd_fid_init(sbi->ll_dt_exp);
411         if (err) {
412                 CERROR("Can't init data layer FID infrastructure, "
413                        "rc %d\n", err);
414                 GOTO(out_dt, err);
415         }
416         
417         spin_lock(&sbi->ll_lco.lco_lock);
418         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
419         spin_unlock(&sbi->ll_lco.lco_lock);
420
421         ll_init_ea_size(sbi->ll_md_exp, sbi->ll_dt_exp);
422
423         err = obd_prep_async_page(sbi->ll_dt_exp, NULL, NULL, NULL,
424                                   0, NULL, NULL, NULL);
425         if (err < 0) {
426                 LCONSOLE_ERROR_MSG(0x151, "There are no OST's in this "
427                                    "filesystem. There must be at least one "
428                                    "active OST for a client to start.\n");
429                 GOTO(out_dt_fid, err);
430         }
431
432         if (!ll_async_page_slab) {
433                 ll_async_page_slab_size =
434                         size_round(sizeof(struct ll_async_page)) + err;
435                 ll_async_page_slab = cfs_mem_cache_create("ll_async_page",
436                                                           ll_async_page_slab_size,
437                                                           0, 0);
438                 if (!ll_async_page_slab)
439                         GOTO(out_dt_fid, err = -ENOMEM);
440         }
441
442         err = md_getstatus(sbi->ll_md_exp, &rootfid, &oc);
443         if (err) {
444                 CERROR("cannot mds_connect: rc = %d\n", err);
445                 GOTO(out_dt_fid, err);
446         }
447         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&rootfid));
448         sbi->ll_root_fid = rootfid;
449
450         sb->s_op = &lustre_super_operations;
451         sb->s_export_op = &lustre_export_operations;
452
453         /* make root inode
454          * XXX: move this to after cbd setup? */
455         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMDSCAPA;
456         if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
457                 valid |= OBD_MD_FLRMTPERM;
458         else if (sbi->ll_flags & LL_SBI_ACL)
459                 valid |= OBD_MD_FLACL;
460
461         err = md_getattr(sbi->ll_md_exp, &rootfid, oc, valid, 0, &request);
462         if (oc)
463                 free_capa(oc);
464         if (err) {
465                 CERROR("md_getattr failed for root: rc = %d\n", err);
466                 GOTO(out_dt_fid, err);
467         }
468         memset(&lmd, 0, sizeof(lmd));
469         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
470                                sbi->ll_md_exp, &lmd);
471         if (err) {
472                 CERROR("failed to understand root inode md: rc = %d\n", err);
473                 ptlrpc_req_finished (request);
474                 GOTO(out_dt_fid, err);
475         }
476
477         LASSERT(fid_is_sane(&sbi->ll_root_fid));
478         root = ll_iget(sb, ll_fid_build_ino(sbi, &sbi->ll_root_fid), &lmd);
479         md_free_lustre_md(sbi->ll_md_exp, &lmd);
480         ptlrpc_req_finished(request);
481
482         if (root == NULL || is_bad_inode(root)) {
483                 if (lmd.lsm)
484                         obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
485 #ifdef CONFIG_FS_POSIX_ACL
486                 if (lmd.posix_acl) {
487                         posix_acl_release(lmd.posix_acl);
488                         lmd.posix_acl = NULL;
489                 }
490 #endif
491                 CERROR("lustre_lite: bad iget4 for root\n");
492                 GOTO(out_root, err = -EBADF);
493         }
494
495         err = ll_close_thread_start(&sbi->ll_lcq);
496         if (err) {
497                 CERROR("cannot start close thread: rc %d\n", err);
498                 GOTO(out_root, err);
499         }
500
501 #ifdef CONFIG_FS_POSIX_ACL
502         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
503                 rct_init(&sbi->ll_rct);
504                 et_init(&sbi->ll_et);
505         }
506 #endif
507
508         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
509         err = obd_set_info_async(sbi->ll_dt_exp, strlen("checksum"),"checksum",
510                                  sizeof(checksum), &checksum, NULL);
511
512         sb->s_root = d_alloc_root(root);
513         if (data != NULL)
514                 OBD_FREE(data, sizeof(*data));
515         sb->s_root->d_op = &ll_d_root_ops;
516         RETURN(err);
517 out_root:
518         if (root)
519                 iput(root);
520 out_dt_fid:
521         obd_fid_fini(sbi->ll_dt_exp);
522 out_dt:
523         obd_disconnect(sbi->ll_dt_exp);
524         sbi->ll_dt_exp = NULL;
525 out_md_fid:
526         obd_fid_fini(sbi->ll_md_exp);
527 out_md:
528         obd_disconnect(sbi->ll_md_exp);
529         sbi->ll_md_exp = NULL;
530 out:
531         if (data != NULL)
532                 OBD_FREE_PTR(data);
533         lprocfs_unregister_mountpoint(sbi);
534         return err;
535 }
536
537 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
538 {
539         int size, rc;
540
541         *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
542         size = sizeof(int);
543         rc = obd_get_info(sbi->ll_md_exp, strlen("max_easize"), "max_easize",
544                           &size, lmmsize);
545         if (rc)
546                 CERROR("Get max mdsize error rc %d \n", rc);
547
548         RETURN(rc);
549 }
550
551 void ll_dump_inode(struct inode *inode)
552 {
553         struct list_head *tmp;
554         int dentry_count = 0;
555
556         LASSERT(inode != NULL);
557
558         list_for_each(tmp, &inode->i_dentry)
559                 dentry_count++;
560
561         CERROR("inode %p dump: dev=%s ino=%lu mode=%o count=%u, %d dentries\n",
562                inode, ll_i2mdexp(inode)->exp_obd->obd_name, inode->i_ino,
563                inode->i_mode, atomic_read(&inode->i_count), dentry_count);
564 }
565
566 void lustre_dump_dentry(struct dentry *dentry, int recur)
567 {
568         struct list_head *tmp;
569         int subdirs = 0;
570
571         LASSERT(dentry != NULL);
572
573         list_for_each(tmp, &dentry->d_subdirs)
574                 subdirs++;
575
576         CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u,"
577                " flags=0x%x, fsdata=%p, %d subdirs\n", dentry,
578                dentry->d_name.len, dentry->d_name.name,
579                dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
580                dentry->d_parent, dentry->d_inode, atomic_read(&dentry->d_count),
581                dentry->d_flags, dentry->d_fsdata, subdirs);
582         if (dentry->d_inode != NULL)
583                 ll_dump_inode(dentry->d_inode);
584
585         if (recur == 0)
586                 return;
587
588         list_for_each(tmp, &dentry->d_subdirs) {
589                 struct dentry *d = list_entry(tmp, struct dentry, d_child);
590                 lustre_dump_dentry(d, recur - 1);
591         }
592 }
593
594 #ifdef HAVE_EXPORT___IGET
595 static void prune_dir_dentries(struct inode *inode)
596 {
597         struct dentry *dentry, *prev = NULL;
598
599         /* due to lustre specific logic, a directory
600          * can have few dentries - a bug from VFS POV */
601 restart:
602         spin_lock(&dcache_lock);
603         if (!list_empty(&inode->i_dentry)) {
604                 dentry = list_entry(inode->i_dentry.prev,
605                                     struct dentry, d_alias);
606                 /* in order to prevent infinite loops we
607                  * break if previous dentry is busy */
608                 if (dentry != prev) {
609                         prev = dentry;
610                         dget_locked(dentry);
611                         spin_unlock(&dcache_lock);
612
613                         /* try to kill all child dentries */
614                         lock_dentry(dentry);
615                         shrink_dcache_parent(dentry);
616                         unlock_dentry(dentry);
617                         dput(dentry);
618
619                         /* now try to get rid of current dentry */
620                         d_prune_aliases(inode);
621                         goto restart;
622                 }
623         }
624         spin_unlock(&dcache_lock);
625 }
626
627 static void prune_deathrow_one(struct ll_inode_info *lli)
628 {
629         struct inode *inode = ll_info2i(lli);
630
631         /* first, try to drop any dentries - they hold a ref on the inode */
632         if (S_ISDIR(inode->i_mode))
633                 prune_dir_dentries(inode);
634         else
635                 d_prune_aliases(inode);
636
637
638         /* if somebody still uses it, leave it */
639         LASSERT(atomic_read(&inode->i_count) > 0);
640         if (atomic_read(&inode->i_count) > 1)
641                 goto out;
642
643         CDEBUG(D_INODE, "inode %lu/%u(%d) looks a good candidate for prune\n",
644                inode->i_ino,inode->i_generation, atomic_read(&inode->i_count));
645
646         /* seems nobody uses it anymore */
647         inode->i_nlink = 0;
648
649 out:
650         iput(inode);
651         return;
652 }
653
654 static void prune_deathrow(struct ll_sb_info *sbi, int try)
655 {
656         struct ll_inode_info *lli;
657         int empty;
658
659         do {
660                 if (need_resched() && try)
661                         break;
662
663                 if (try) {
664                         if (!spin_trylock(&sbi->ll_deathrow_lock))
665                                 break;
666                 } else {
667                         spin_lock(&sbi->ll_deathrow_lock);
668                 }
669
670                 empty = 1;
671                 lli = NULL;
672                 if (!list_empty(&sbi->ll_deathrow)) {
673                         lli = list_entry(sbi->ll_deathrow.next,
674                                          struct ll_inode_info,
675                                          lli_dead_list);
676                         list_del_init(&lli->lli_dead_list);
677                         if (!list_empty(&sbi->ll_deathrow))
678                                 empty = 0;
679                 }
680                 spin_unlock(&sbi->ll_deathrow_lock);
681
682                 if (lli)
683                         prune_deathrow_one(lli);
684
685         } while (empty == 0);
686 }
687 #else /* !HAVE_EXPORT___IGET */
688 #define prune_deathrow(sbi, try) do {} while (0)
689 #endif /* HAVE_EXPORT___IGET */
690
691 void client_common_put_super(struct super_block *sb)
692 {
693         struct ll_sb_info *sbi = ll_s2sbi(sb);
694         ENTRY;
695
696 #ifdef CONFIG_FS_POSIX_ACL
697         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
698                 et_fini(&sbi->ll_et);
699                 rct_fini(&sbi->ll_rct);
700         }
701 #endif
702
703         obd_cancel_unused(sbi->ll_dt_exp, NULL, 0, NULL);
704
705         ll_close_thread_shutdown(sbi->ll_lcq);
706
707         /* destroy inodes in deathrow */
708         prune_deathrow(sbi, 0);
709
710         list_del(&sbi->ll_conn_chain);
711
712         obd_fid_fini(sbi->ll_dt_exp);
713         obd_disconnect(sbi->ll_dt_exp);
714         sbi->ll_dt_exp = NULL;
715
716         lprocfs_unregister_mountpoint(sbi);
717
718         obd_fid_fini(sbi->ll_md_exp);
719         obd_disconnect(sbi->ll_md_exp);
720         sbi->ll_md_exp = NULL;
721
722         EXIT;
723 }
724
725 void ll_kill_super(struct super_block *sb)
726 {
727         struct ll_sb_info *sbi;
728
729         ENTRY;
730
731         /* not init sb ?*/
732         if (!(sb->s_flags & MS_ACTIVE))
733                 return;
734
735         sbi = ll_s2sbi(sb);
736         /* we need restore s_dev from changed for clustred NFS before put_super
737          * because new kernels have cached s_dev and change sb->s_dev in
738          * put_super not affected real removing devices */
739         if (sbi)
740                 sb->s_dev = sbi->ll_sdev_orig;
741         EXIT;
742 }
743
744 char *ll_read_opt(const char *opt, char *data)
745 {
746         char *value;
747         char *retval;
748         ENTRY;
749
750         CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
751         if (strncmp(opt, data, strlen(opt)))
752                 RETURN(NULL);
753         if ((value = strchr(data, '=')) == NULL)
754                 RETURN(NULL);
755
756         value++;
757         OBD_ALLOC(retval, strlen(value) + 1);
758         if (!retval) {
759                 CERROR("out of memory!\n");
760                 RETURN(NULL);
761         }
762
763         memcpy(retval, value, strlen(value)+1);
764         CDEBUG(D_SUPER, "Assigned option: %s, value %s\n", opt, retval);
765         RETURN(retval);
766 }
767
768 static inline int ll_set_opt(const char *opt, char *data, int fl)
769 {
770         if (strncmp(opt, data, strlen(opt)) != 0)
771                 return(0);
772         else
773                 return(fl);
774 }
775
776 /* non-client-specific mount options are parsed in lmd_parse */
777 static int ll_options(char *options, int *flags)
778 {
779         int tmp;
780         char *s1 = options, *s2;
781         ENTRY;
782
783         if (!options) 
784                 RETURN(0);
785
786         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
787
788         while (*s1) {
789                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
790                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
791                 if (tmp) {
792                         *flags |= tmp;
793                         goto next;
794                 }
795                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
796                 if (tmp) {
797                         *flags |= tmp;
798                         goto next;
799                 }
800                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
801                 if (tmp) {
802                         *flags |= tmp;
803                         goto next;
804                 }
805                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
806                 if (tmp) {
807                         *flags &= ~tmp;
808                         goto next;
809                 }
810                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
811                 if (tmp) {
812                         *flags |= tmp;
813                         goto next;
814                 }
815                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
816                 if (tmp) {
817                         *flags &= ~tmp;
818                         goto next;
819                 }
820                 tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
821                 if (tmp) {
822                         /* Ignore deprecated mount option.  The client will
823                          * always try to mount with ACL support, whether this
824                          * is used depends on whether server supports it. */
825                         goto next;
826                 }
827                 tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
828                 if (tmp) {
829                         goto next;
830                 }
831                 tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
832                 if (tmp) {
833                         *flags |= tmp;
834                         goto next;
835                 }
836
837                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
838                 if (tmp) {
839                         *flags |= tmp;
840                         goto next;
841                 }
842                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
843                 if (tmp) {
844                         *flags &= ~tmp;
845                         goto next;
846                 }
847                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
848                 if (tmp) {
849                         *flags |= tmp;
850                         goto next;
851                 }
852                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
853                 if (tmp) {
854                         *flags &= ~tmp;
855                         goto next;
856                 }
857
858                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
859                                    s1);
860                 RETURN(-EINVAL);
861
862 next:
863                 /* Find next opt */
864                 s2 = strchr(s1, ',');
865                 if (s2 == NULL)
866                         break;
867                 s1 = s2 + 1;
868         }
869         RETURN(0);
870 }
871
872 void ll_lli_init(struct ll_inode_info *lli)
873 {
874         lli->lli_inode_magic = LLI_INODE_MAGIC;
875         sema_init(&lli->lli_size_sem, 1);
876         sema_init(&lli->lli_write_sem, 1);
877         lli->lli_flags = 0;
878         lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
879         spin_lock_init(&lli->lli_lock);
880         INIT_LIST_HEAD(&lli->lli_pending_write_llaps);
881         INIT_LIST_HEAD(&lli->lli_close_list);
882         lli->lli_inode_magic = LLI_INODE_MAGIC;
883         sema_init(&lli->lli_och_sem, 1);
884         lli->lli_mds_read_och = lli->lli_mds_write_och = NULL;
885         lli->lli_mds_exec_och = NULL;
886         lli->lli_open_fd_read_count = lli->lli_open_fd_write_count = 0;
887         lli->lli_open_fd_exec_count = 0;
888         INIT_LIST_HEAD(&lli->lli_dead_list);
889         lli->lli_remote_perms = NULL;
890         lli->lli_rmtperm_utime = 0;
891         sema_init(&lli->lli_rmtperm_sem, 1);
892         INIT_LIST_HEAD(&lli->lli_oss_capas);
893 }
894
895 int ll_fill_super(struct super_block *sb)
896 {
897         struct lustre_profile *lprof;
898         struct lustre_sb_info *lsi = s2lsi(sb);
899         struct ll_sb_info *sbi;
900         char  *dt = NULL, *md = NULL;
901         char  *profilenm = get_profile_name(sb);
902         struct config_llog_instance cfg = {0, };
903         char   ll_instance[sizeof(sb) * 2 + 1];
904         int    err;
905         ENTRY;
906
907         CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
908
909         cfs_module_get();
910
911         sb->s_type->fs_flags |= FS_RENAME_DOES_D_MOVE;
912         /* client additional sb info */
913         lsi->lsi_llsbi = sbi = ll_init_sbi();
914         if (!sbi) {
915                 cfs_module_put();
916                 RETURN(-ENOMEM);
917         }
918
919         err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
920         if (err) 
921                 GOTO(out_free, err);
922
923         /* Generate a string unique to this super, in case some joker tries
924            to mount the same fs at two mount points.
925            Use the address of the super itself.*/
926         sprintf(ll_instance, "%p", sb);
927         cfg.cfg_instance = ll_instance;
928         cfg.cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
929
930         /* set up client obds */
931         err = lustre_process_log(sb, profilenm, &cfg);
932         if (err < 0) {
933                 CERROR("Unable to process log: %d\n", err);
934                 GOTO(out_free, err);
935         }
936
937         lprof = class_get_profile(profilenm);
938         if (lprof == NULL) {
939                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
940                                    " read from the MGS.  Does that filesystem "
941                                    "exist?\n", profilenm);
942                 GOTO(out_free, err = -EINVAL);
943         }
944         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
945                lprof->lp_md, lprof->lp_dt);
946
947         OBD_ALLOC(dt, strlen(lprof->lp_dt) +
948                   strlen(ll_instance) + 2);
949         if (!dt)
950                 GOTO(out_free, err = -ENOMEM);
951         sprintf(dt, "%s-%s", lprof->lp_dt, ll_instance);
952
953         OBD_ALLOC(md, strlen(lprof->lp_md) +
954                   strlen(ll_instance) + 2);
955         if (!md)
956                 GOTO(out_free, err = -ENOMEM);
957         sprintf(md, "%s-%s", lprof->lp_md, ll_instance);
958
959         /* connections, registrations, sb setup */
960         err = client_common_fill_super(sb, md, dt);
961
962 out_free:
963         if (md)
964                 OBD_FREE(md, strlen(md) + 1);
965         if (dt)
966                 OBD_FREE(dt, strlen(dt) + 1);
967         if (err) 
968                 ll_put_super(sb);
969         else
970                 LCONSOLE_WARN("Client %s has started\n", profilenm);        
971
972         RETURN(err);
973 } /* ll_fill_super */
974
975
976 void ll_put_super(struct super_block *sb)
977 {
978         struct config_llog_instance cfg;
979         char   ll_instance[sizeof(sb) * 2 + 1];
980         struct obd_device *obd;
981         struct lustre_sb_info *lsi = s2lsi(sb);
982         struct ll_sb_info *sbi = ll_s2sbi(sb);
983         char *profilenm = get_profile_name(sb);
984         int force = 1, next;
985         ENTRY;
986
987         CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
988
989         ll_print_capa_stat(sbi);
990
991         sprintf(ll_instance, "%p", sb);
992         cfg.cfg_instance = ll_instance;
993         lustre_end_log(sb, NULL, &cfg);
994         
995         if (sbi->ll_md_exp) {
996                 obd = class_exp2obd(sbi->ll_md_exp);
997                 if (obd) 
998                         force = obd->obd_force;
999         }
1000         
1001         /* We need to set force before the lov_disconnect in 
1002            lustre_common_put_super, since l_d cleans up osc's as well. */
1003         if (force) {
1004                 next = 0;
1005                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1006                                                      &next)) != NULL) {
1007                         obd->obd_force = force;
1008                 }
1009         }                       
1010
1011         if (sbi->ll_lcq) {
1012                 /* Only if client_common_fill_super succeeded */
1013                 client_common_put_super(sb);
1014         }
1015         next = 0;
1016         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) {
1017                 class_manual_cleanup(obd);
1018         }
1019
1020         if (profilenm)
1021                 class_del_profile(profilenm);
1022
1023         ll_free_sbi(sb);
1024         lsi->lsi_llsbi = NULL;
1025
1026         lustre_common_put_super(sb);
1027
1028         LCONSOLE_WARN("client %s umount complete\n", ll_instance);
1029         
1030         cfs_module_put();
1031
1032         EXIT;
1033 } /* client_put_super */
1034
1035 #ifdef HAVE_REGISTER_CACHE
1036 #include <linux/cache_def.h>
1037 #ifdef HAVE_CACHE_RETURN_INT
1038 static int
1039 #else
1040 static void
1041 #endif
1042 ll_shrink_cache(int priority, unsigned int gfp_mask)
1043 {
1044         struct ll_sb_info *sbi;
1045         int count = 0;
1046
1047         list_for_each_entry(sbi, &ll_super_blocks, ll_list)
1048                 count += llap_shrink_cache(sbi, priority);
1049
1050 #ifdef HAVE_CACHE_RETURN_INT
1051         return count;
1052 #endif
1053 }
1054
1055 struct cache_definition ll_cache_definition = {
1056         .name = "llap_cache",
1057         .shrink = ll_shrink_cache
1058 };
1059 #endif /* HAVE_REGISTER_CACHE */
1060
1061 struct inode *ll_inode_from_lock(struct ldlm_lock *lock)
1062 {
1063         struct inode *inode = NULL;
1064         /* NOTE: we depend on atomic igrab() -bzzz */
1065         lock_res_and_lock(lock);
1066         if (lock->l_ast_data) {
1067                 struct ll_inode_info *lli = ll_i2info(lock->l_ast_data);
1068                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1069                         inode = igrab(lock->l_ast_data);
1070                 } else {
1071                         inode = lock->l_ast_data;
1072                         ldlm_lock_debug(NULL, inode->i_state & I_FREEING ?
1073                                                 D_INFO : D_WARNING,
1074                                         lock, __FILE__, __func__, __LINE__,
1075                                         "l_ast_data %p is bogus: magic %08x",
1076                                         lock->l_ast_data, lli->lli_inode_magic);
1077                         inode = NULL;
1078                 }
1079         }
1080         unlock_res_and_lock(lock);
1081         return inode;
1082 }
1083
1084 static int null_if_equal(struct ldlm_lock *lock, void *data)
1085 {
1086         if (data == lock->l_ast_data) {
1087                 lock->l_ast_data = NULL;
1088
1089                 if (lock->l_req_mode != lock->l_granted_mode)
1090                         LDLM_ERROR(lock,"clearing inode with ungranted lock");
1091         }
1092
1093         return LDLM_ITER_CONTINUE;
1094 }
1095
1096 void ll_clear_inode(struct inode *inode)
1097 {
1098         struct ll_inode_info *lli = ll_i2info(inode);
1099         struct ll_sb_info *sbi = ll_i2sbi(inode);
1100         ENTRY;
1101
1102         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1103                inode->i_generation, inode);
1104
1105         ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1106         md_change_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
1107                          null_if_equal, inode);
1108
1109         LASSERT(!lli->lli_open_fd_write_count);
1110         LASSERT(!lli->lli_open_fd_read_count);
1111         LASSERT(!lli->lli_open_fd_exec_count);
1112
1113         if (lli->lli_mds_write_och)
1114                 ll_md_real_close(inode, FMODE_WRITE);
1115         if (lli->lli_mds_exec_och)
1116                 ll_md_real_close(inode, FMODE_EXEC);
1117         if (lli->lli_mds_read_och)
1118                 ll_md_real_close(inode, FMODE_READ);
1119
1120         if (lli->lli_smd) {
1121                 obd_change_cbdata(sbi->ll_dt_exp, lli->lli_smd,
1122                                   null_if_equal, inode);
1123
1124                 obd_free_memmd(sbi->ll_dt_exp, &lli->lli_smd);
1125                 lli->lli_smd = NULL;
1126         }
1127
1128         if (lli->lli_symlink_name) {
1129                 OBD_FREE(lli->lli_symlink_name,
1130                          strlen(lli->lli_symlink_name) + 1);
1131                 lli->lli_symlink_name = NULL;
1132         }
1133
1134         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1135                 LASSERT(lli->lli_posix_acl == NULL);
1136                 if (lli->lli_remote_perms) {
1137                         free_rmtperm_hash(lli->lli_remote_perms);
1138                         lli->lli_remote_perms = NULL;
1139                 }
1140         }
1141 #ifdef CONFIG_FS_POSIX_ACL
1142         else if (lli->lli_posix_acl) {
1143                 LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
1144                 LASSERT(lli->lli_remote_perms == NULL);
1145                 posix_acl_release(lli->lli_posix_acl);
1146                 lli->lli_posix_acl = NULL;
1147         }
1148 #endif
1149         lli->lli_inode_magic = LLI_INODE_DEAD;
1150
1151 #ifdef HAVE_EXPORT___IGET
1152         spin_lock(&sbi->ll_deathrow_lock);
1153         list_del_init(&lli->lli_dead_list);
1154         spin_unlock(&sbi->ll_deathrow_lock);
1155 #endif
1156         ll_clear_inode_capas(inode);
1157
1158         EXIT;
1159 }
1160
1161 int ll_md_setattr(struct inode *inode, struct md_op_data *op_data,
1162                   struct md_open_data **mod)
1163 {
1164         struct lustre_md md;
1165         struct ll_sb_info *sbi = ll_i2sbi(inode);
1166         struct ptlrpc_request *request = NULL;
1167         int rc;
1168         ENTRY;
1169         
1170         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0, 
1171                                      LUSTRE_OPC_ANY, NULL);
1172         if (IS_ERR(op_data))
1173                 RETURN(PTR_ERR(op_data));
1174
1175         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0, 
1176                         &request, mod);
1177         if (rc) {
1178                 ptlrpc_req_finished(request);
1179                 if (rc == -ENOENT) {
1180                         inode->i_nlink = 0;
1181                         /* Unlinked special device node? Or just a race?
1182                          * Pretend we done everything. */
1183                         if (!S_ISREG(inode->i_mode) &&
1184                             !S_ISDIR(inode->i_mode))
1185                                 rc = inode_setattr(inode, &op_data->op_attr);
1186                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1187                         CERROR("md_setattr fails: rc = %d\n", rc);
1188                 }
1189                 RETURN(rc);
1190         }
1191
1192         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1193                               sbi->ll_md_exp, &md);
1194         if (rc) {
1195                 ptlrpc_req_finished(request);
1196                 RETURN(rc);
1197         }
1198
1199         /* We call inode_setattr to adjust timestamps.
1200          * If there is at least some data in file, we cleared ATTR_SIZE
1201          * above to avoid invoking vmtruncate, otherwise it is important
1202          * to call vmtruncate in inode_setattr to update inode->i_size
1203          * (bug 6196) */
1204         rc = inode_setattr(inode, &op_data->op_attr);
1205
1206         /* Extract epoch data if obtained. */
1207         op_data->op_handle = md.body->handle;
1208         op_data->op_ioepoch = md.body->ioepoch;
1209
1210         ll_update_inode(inode, &md);
1211         ptlrpc_req_finished(request);
1212
1213         RETURN(rc);
1214 }
1215
1216 /* Close IO epoch and send Size-on-MDS attribute update. */
1217 static int ll_setattr_done_writing(struct inode *inode,
1218                                    struct md_op_data *op_data,
1219                                    struct md_open_data *mod)
1220 {
1221         struct ll_inode_info *lli = ll_i2info(inode);
1222         int rc = 0;
1223         ENTRY;
1224         
1225         LASSERT(op_data != NULL);
1226         if (!S_ISREG(inode->i_mode))
1227                 RETURN(0);
1228
1229         CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID" for truncate\n",
1230                op_data->op_ioepoch, PFID(&lli->lli_fid));
1231
1232         op_data->op_flags = MF_EPOCH_CLOSE | MF_SOM_CHANGE;
1233         rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1234         if (rc == -EAGAIN) {
1235                 /* MDS has instructed us to obtain Size-on-MDS attribute
1236                  * from OSTs and send setattr to back to MDS. */
1237                 rc = ll_sizeonmds_update(inode, mod, &op_data->op_handle,
1238                                          op_data->op_ioepoch);
1239         } else if (rc) {
1240                 CERROR("inode %lu mdc truncate failed: rc = %d\n",
1241                        inode->i_ino, rc);
1242         }
1243         RETURN(rc);
1244 }
1245
1246 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1247  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1248  * keep these values until such a time that objects are allocated for it.
1249  * We do the MDS operations first, as it is checking permissions for us.
1250  * We don't to the MDS RPC if there is nothing that we want to store there,
1251  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1252  * going to do an RPC anyways.
1253  *
1254  * If we are doing a truncate, we will send the mtime and ctime updates
1255  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1256  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1257  * at the same time.
1258  */
1259 int ll_setattr_raw(struct inode *inode, struct iattr *attr)
1260 {
1261         struct ll_inode_info *lli = ll_i2info(inode);
1262         struct lov_stripe_md *lsm = lli->lli_smd;
1263         struct ll_sb_info *sbi = ll_i2sbi(inode);
1264         struct md_op_data *op_data = NULL;
1265         struct md_open_data *mod = NULL;
1266         int ia_valid = attr->ia_valid;
1267         int rc = 0, rc1 = 0;
1268         ENTRY;
1269
1270         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu valid %x\n", inode->i_ino,
1271                attr->ia_valid);
1272         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETATTR, 1);
1273
1274         if (ia_valid & ATTR_SIZE) {
1275                 if (attr->ia_size > ll_file_maxbytes(inode)) {
1276                         CDEBUG(D_INODE, "file too large %llu > "LPU64"\n",
1277                                attr->ia_size, ll_file_maxbytes(inode));
1278                         RETURN(-EFBIG);
1279                 }
1280
1281                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1282         }
1283
1284         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1285         if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
1286                 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
1287                         RETURN(-EPERM);
1288         }
1289
1290         /* We mark all of the fields "set" so MDS/OST does not re-set them */
1291         if (attr->ia_valid & ATTR_CTIME) {
1292                 attr->ia_ctime = CURRENT_TIME;
1293                 attr->ia_valid |= ATTR_CTIME_SET;
1294         }
1295         if (!(ia_valid & ATTR_ATIME_SET) && (attr->ia_valid & ATTR_ATIME)) {
1296                 attr->ia_atime = CURRENT_TIME;
1297                 attr->ia_valid |= ATTR_ATIME_SET;
1298         }
1299         if (!(ia_valid & ATTR_MTIME_SET) && (attr->ia_valid & ATTR_MTIME)) {
1300                 attr->ia_mtime = CURRENT_TIME;
1301                 attr->ia_valid |= ATTR_MTIME_SET;
1302         }
1303         if ((attr->ia_valid & ATTR_CTIME) && !(attr->ia_valid & ATTR_MTIME)) {
1304                 /* To avoid stale mtime on mds, obtain it from ost and send 
1305                    to mds. */
1306                 rc = ll_glimpse_size(inode, 0);
1307                 if (rc) 
1308                         RETURN(rc);
1309                 
1310                 attr->ia_valid |= ATTR_MTIME_SET | ATTR_MTIME;
1311                 attr->ia_mtime = inode->i_mtime;
1312         }
1313
1314         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1315                 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
1316                        LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1317                        CURRENT_SECONDS);
1318
1319         /* NB: ATTR_SIZE will only be set after this point if the size
1320          * resides on the MDS, ie, this file has no objects. */
1321         if (lsm)
1322                 attr->ia_valid &= ~ATTR_SIZE;
1323
1324         /* We always do an MDS RPC, even if we're only changing the size;
1325          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1326
1327         OBD_ALLOC_PTR(op_data);
1328         if (op_data == NULL)
1329                 RETURN(-ENOMEM);
1330
1331         memcpy(&op_data->op_attr, attr, sizeof(*attr));
1332
1333         /* Open epoch for truncate. */
1334         if ((ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM) &&
1335             (ia_valid & ATTR_SIZE))
1336                 op_data->op_flags = MF_EPOCH_OPEN;
1337
1338         rc = ll_md_setattr(inode, op_data, &mod);
1339         if (rc)
1340                 GOTO(out, rc);
1341
1342         if (op_data->op_ioepoch)
1343                 CDEBUG(D_INODE, "Epoch "LPU64" opened on "DFID" for "
1344                        "truncate\n", op_data->op_ioepoch, PFID(&lli->lli_fid));
1345
1346         if (!lsm || !S_ISREG(inode->i_mode)) {
1347                 CDEBUG(D_INODE, "no lsm: not setting attrs on OST\n");
1348                 GOTO(out, rc = 0);
1349         }
1350
1351         /* We really need to get our PW lock before we change inode->i_size.
1352          * If we don't we can race with other i_size updaters on our node, like
1353          * ll_file_read.  We can also race with i_size propogation to other
1354          * nodes through dirtying and writeback of final cached pages.  This
1355          * last one is especially bad for racing o_append users on other
1356          * nodes. */
1357         if (ia_valid & ATTR_SIZE) {
1358                 ldlm_policy_data_t policy = { .l_extent = {attr->ia_size,
1359                                                            OBD_OBJECT_EOF } };
1360                 struct lustre_handle lockh = { 0 };
1361                 int err, ast_flags = 0;
1362                 /* XXX when we fix the AST intents to pass the discard-range
1363                  * XXX extent, make ast_flags always LDLM_AST_DISCARD_DATA
1364                  * XXX here. */
1365                 if (attr->ia_size == 0)
1366                         ast_flags = LDLM_AST_DISCARD_DATA;
1367
1368                 UNLOCK_INODE_MUTEX(inode);
1369                 UP_WRITE_I_ALLOC_SEM(inode);
1370                 rc = ll_extent_lock(NULL, inode, lsm, LCK_PW, &policy, &lockh,
1371                                     ast_flags);
1372                 LOCK_INODE_MUTEX(inode);
1373                 DOWN_WRITE_I_ALLOC_SEM(inode);
1374
1375                 if (rc != 0)
1376                         GOTO(out, rc);
1377
1378                 /* Only ll_inode_size_lock is taken at this level.
1379                  * lov_stripe_lock() is grabbed by ll_truncate() only over
1380                  * call to obd_adjust_kms().  If vmtruncate returns 0, then
1381                  * ll_truncate dropped ll_inode_size_lock() */
1382                 ll_inode_size_lock(inode, 0);
1383                 rc = vmtruncate(inode, attr->ia_size);
1384                 if (rc != 0) {
1385                         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
1386                         ll_inode_size_unlock(inode, 0);
1387                 }
1388
1389                 err = ll_extent_unlock(NULL, inode, lsm, LCK_PW, &lockh);
1390                 if (err) {
1391                         CERROR("ll_extent_unlock failed: %d\n", err);
1392                         if (!rc)
1393                                 rc = err;
1394                 }
1395         } else if (ia_valid & (ATTR_MTIME | ATTR_MTIME_SET)) {
1396                 obd_flag flags;
1397                 struct obd_info oinfo = { { { 0 } } };
1398                 struct obdo *oa;
1399
1400                 CDEBUG(D_INODE, "set mtime on OST inode %lu to %lu\n",
1401                        inode->i_ino, LTIME_S(attr->ia_mtime));
1402
1403                 OBDO_ALLOC(oa);
1404                 if (oa) {
1405                         oa->o_id = lsm->lsm_object_id;
1406                         oa->o_gr = lsm->lsm_object_gr;
1407                         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
1408
1409                         flags = OBD_MD_FLTYPE | OBD_MD_FLATIME |
1410                                 OBD_MD_FLMTIME | OBD_MD_FLCTIME |
1411                                 OBD_MD_FLFID | OBD_MD_FLGENER | 
1412                                 OBD_MD_FLGROUP;
1413
1414                         obdo_from_inode(oa, inode, flags);
1415
1416                         oinfo.oi_oa = oa;
1417                         oinfo.oi_md = lsm;
1418                         oinfo.oi_capa = ll_mdscapa_get(inode);
1419
1420                         /* XXX: this looks unnecessary now. */
1421                         rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1422                         capa_put(oinfo.oi_capa);
1423                         if (rc)
1424                                 CERROR("obd_setattr_async fails: rc=%d\n", rc);
1425                         OBDO_FREE(oa);
1426                 } else {
1427                         rc = -ENOMEM;
1428                 }
1429         }
1430         EXIT;
1431 out:
1432         if (op_data) {
1433                 if (op_data->op_ioepoch)
1434                         rc1 = ll_setattr_done_writing(inode, op_data, mod);
1435                 ll_finish_md_op_data(op_data);
1436         }
1437         return rc ? rc : rc1;
1438 }
1439
1440 int ll_setattr(struct dentry *de, struct iattr *attr)
1441 {
1442         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
1443             (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
1444                 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1445
1446         return ll_setattr_raw(de->d_inode, attr);
1447 }
1448
1449 int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1450                        __u64 max_age)
1451 {
1452         struct ll_sb_info *sbi = ll_s2sbi(sb);
1453         struct obd_statfs obd_osfs;
1454         int rc;
1455         ENTRY;
1456
1457         rc = obd_statfs(class_exp2obd(sbi->ll_md_exp), osfs, max_age);
1458         if (rc) {
1459                 CERROR("md_statfs fails: rc = %d\n", rc);
1460                 RETURN(rc);
1461         }
1462
1463         osfs->os_type = sb->s_magic;
1464
1465         CDEBUG(D_SUPER, "MDC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1466                osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,osfs->os_files);
1467
1468         rc = obd_statfs_rqset(class_exp2obd(sbi->ll_dt_exp),
1469                               &obd_osfs, max_age, 0);
1470         if (rc) {
1471                 CERROR("obd_statfs fails: rc = %d\n", rc);
1472                 RETURN(rc);
1473         }
1474
1475         CDEBUG(D_SUPER, "OSC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1476                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1477                obd_osfs.os_files);
1478
1479         osfs->os_bsize = obd_osfs.os_bsize;
1480         osfs->os_blocks = obd_osfs.os_blocks;
1481         osfs->os_bfree = obd_osfs.os_bfree;
1482         osfs->os_bavail = obd_osfs.os_bavail;
1483
1484         /* If we don't have as many objects free on the OST as inodes
1485          * on the MDS, we reduce the total number of inodes to
1486          * compensate, so that the "inodes in use" number is correct.
1487          */
1488         if (obd_osfs.os_ffree < osfs->os_ffree) {
1489                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1490                         obd_osfs.os_ffree;
1491                 osfs->os_ffree = obd_osfs.os_ffree;
1492         }
1493
1494         RETURN(rc);
1495 }
1496 #ifndef HAVE_STATFS_DENTRY_PARAM
1497 int ll_statfs(struct super_block *sb, struct kstatfs *sfs)
1498 {
1499 #else
1500 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1501 {
1502         struct super_block *sb = de->d_sb;
1503 #endif
1504         struct obd_statfs osfs;
1505         int rc;
1506
1507         CDEBUG(D_VFSTRACE, "VFS Op: at "LPU64" jiffies\n", get_jiffies_64());
1508         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1509
1510         /* For now we will always get up-to-date statfs values, but in the
1511          * future we may allow some amount of caching on the client (e.g.
1512          * from QOS or lprocfs updates). */
1513         rc = ll_statfs_internal(sb, &osfs, cfs_time_current_64() - 1);
1514         if (rc)
1515                 return rc;
1516
1517         statfs_unpack(sfs, &osfs);
1518
1519         /* We need to downshift for all 32-bit kernels, because we can't
1520          * tell if the kernel is being called via sys_statfs64() or not.
1521          * Stop before overflowing f_bsize - in which case it is better
1522          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
1523         if (sizeof(long) < 8) {
1524                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1525                         sfs->f_bsize <<= 1;
1526
1527                         osfs.os_blocks >>= 1;
1528                         osfs.os_bfree >>= 1;
1529                         osfs.os_bavail >>= 1;
1530                 }
1531         }
1532
1533         sfs->f_blocks = osfs.os_blocks;
1534         sfs->f_bfree = osfs.os_bfree;
1535         sfs->f_bavail = osfs.os_bavail;
1536
1537         return 0;
1538 }
1539
1540 void ll_inode_size_lock(struct inode *inode, int lock_lsm)
1541 {
1542         struct ll_inode_info *lli;
1543         struct lov_stripe_md *lsm;
1544
1545         lli = ll_i2info(inode);
1546         LASSERT(lli->lli_size_sem_owner != current);
1547         down(&lli->lli_size_sem);
1548         LASSERT(lli->lli_size_sem_owner == NULL);
1549         lli->lli_size_sem_owner = current;
1550         lsm = lli->lli_smd;
1551         LASSERTF(lsm != NULL || lock_lsm == 0, "lsm %p, lock_lsm %d\n",
1552                  lsm, lock_lsm);
1553         if (lock_lsm)
1554                 lov_stripe_lock(lsm);
1555 }
1556
1557 void ll_inode_size_unlock(struct inode *inode, int unlock_lsm)
1558 {
1559         struct ll_inode_info *lli;
1560         struct lov_stripe_md *lsm;
1561
1562         lli = ll_i2info(inode);
1563         lsm = lli->lli_smd;
1564         LASSERTF(lsm != NULL || unlock_lsm == 0, "lsm %p, lock_lsm %d\n",
1565                  lsm, unlock_lsm);
1566         if (unlock_lsm)
1567                 lov_stripe_unlock(lsm);
1568         LASSERT(lli->lli_size_sem_owner == current);
1569         lli->lli_size_sem_owner = NULL;
1570         up(&lli->lli_size_sem);
1571 }
1572
1573 static void ll_replace_lsm(struct inode *inode, struct lov_stripe_md *lsm)
1574 {
1575         struct ll_inode_info *lli = ll_i2info(inode);
1576
1577         dump_lsm(D_INODE, lsm);
1578         dump_lsm(D_INODE, lli->lli_smd);
1579         LASSERTF(lsm->lsm_magic == LOV_MAGIC_JOIN,
1580                  "lsm must be joined lsm %p\n", lsm);
1581         obd_free_memmd(ll_i2dtexp(inode), &lli->lli_smd);
1582         CDEBUG(D_INODE, "replace lsm %p to lli_smd %p for inode %lu%u(%p)\n",
1583                lsm, lli->lli_smd, inode->i_ino, inode->i_generation, inode);
1584         lli->lli_smd = lsm;
1585         lli->lli_maxbytes = lsm->lsm_maxbytes;
1586         if (lli->lli_maxbytes > PAGE_CACHE_MAXBYTES)
1587                 lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
1588 }
1589
1590 void ll_update_inode(struct inode *inode, struct lustre_md *md)
1591 {
1592         struct ll_inode_info *lli = ll_i2info(inode);
1593         struct mdt_body *body = md->body;
1594         struct lov_stripe_md *lsm = md->lsm;
1595         struct ll_sb_info *sbi = ll_i2sbi(inode);
1596
1597         LASSERT ((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
1598         if (lsm != NULL) {
1599                 if (lli->lli_smd == NULL) {
1600                         if (lsm->lsm_magic != LOV_MAGIC &&
1601                             lsm->lsm_magic != LOV_MAGIC_JOIN) {
1602                                 dump_lsm(D_ERROR, lsm);
1603                                 LBUG();
1604                         }
1605                         CDEBUG(D_INODE, "adding lsm %p to inode %lu/%u(%p)\n",
1606                                lsm, inode->i_ino, inode->i_generation, inode);
1607                         /* ll_inode_size_lock() requires it is only called
1608                          * with lli_smd != NULL or lock_lsm == 0 or we can
1609                          * race between lock/unlock.  bug 9547 */
1610                         lli->lli_smd = lsm;
1611                         lli->lli_maxbytes = lsm->lsm_maxbytes;
1612                         if (lli->lli_maxbytes > PAGE_CACHE_MAXBYTES)
1613                                 lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
1614                 } else {
1615                         if (lli->lli_smd->lsm_magic == lsm->lsm_magic &&
1616                              lli->lli_smd->lsm_stripe_count ==
1617                                         lsm->lsm_stripe_count) {
1618                                 if (lov_stripe_md_cmp(lli->lli_smd, lsm)) {
1619                                         CERROR("lsm mismatch for inode %ld\n",
1620                                                 inode->i_ino);
1621                                         CERROR("lli_smd:\n");
1622                                         dump_lsm(D_ERROR, lli->lli_smd);
1623                                         CERROR("lsm:\n");
1624                                         dump_lsm(D_ERROR, lsm);
1625                                         LBUG();
1626                                 }
1627                         } else
1628                                 ll_replace_lsm(inode, lsm);
1629                 }
1630                 if (lli->lli_smd != lsm)
1631                         obd_free_memmd(ll_i2dtexp(inode), &lsm);
1632         }
1633
1634         if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1635                 if (body->valid & OBD_MD_FLRMTPERM)
1636                         ll_update_remote_perm(inode, md->remote_perm);
1637         }
1638 #ifdef CONFIG_FS_POSIX_ACL
1639         else if (body->valid & OBD_MD_FLACL) {
1640                 spin_lock(&lli->lli_lock);
1641                 if (lli->lli_posix_acl)
1642                         posix_acl_release(lli->lli_posix_acl);
1643                 lli->lli_posix_acl = md->posix_acl;
1644                 spin_unlock(&lli->lli_lock);
1645         }
1646 #endif
1647         if (body->valid & OBD_MD_FLATIME &&
1648             body->atime > LTIME_S(inode->i_atime))
1649                 LTIME_S(inode->i_atime) = body->atime;
1650         
1651         /* mtime is always updated with ctime, but can be set in past.
1652            As write and utime(2) may happen within 1 second, and utime's
1653            mtime has a priority over write's one, so take mtime from mds 
1654            for the same ctimes. */
1655         if (body->valid & OBD_MD_FLCTIME &&
1656             body->ctime >= LTIME_S(inode->i_ctime)) {
1657                 LTIME_S(inode->i_ctime) = body->ctime;
1658                 if (body->valid & OBD_MD_FLMTIME) {
1659                         CDEBUG(D_INODE, "setting ino %lu mtime "
1660                                "from %lu to "LPU64"\n", inode->i_ino, 
1661                                LTIME_S(inode->i_mtime), body->mtime);
1662                         LTIME_S(inode->i_mtime) = body->mtime;
1663                 }
1664         }
1665         if (body->valid & OBD_MD_FLMODE)
1666                 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1667         if (body->valid & OBD_MD_FLTYPE)
1668                 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1669         if (S_ISREG(inode->i_mode)) {
1670                 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, LL_MAX_BLKSIZE_BITS);
1671         } else {
1672                 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1673         }
1674 #ifdef HAVE_INODE_BLKSIZE
1675         inode->i_blksize = 1<<inode->i_blkbits;
1676 #endif
1677         if (body->valid & OBD_MD_FLUID)
1678                 inode->i_uid = body->uid;
1679         if (body->valid & OBD_MD_FLGID)
1680                 inode->i_gid = body->gid;
1681         if (body->valid & OBD_MD_FLFLAGS)
1682                 inode->i_flags = ll_ext_to_inode_flags(body->flags);
1683         if (body->valid & OBD_MD_FLNLINK)
1684                 inode->i_nlink = body->nlink;
1685         if (body->valid & OBD_MD_FLRDEV)
1686                 inode->i_rdev = old_decode_dev(body->rdev);
1687
1688         if (body->valid & OBD_MD_FLID) {
1689                 /* FID shouldn't be changed! */
1690                 if (fid_is_sane(&lli->lli_fid)) {
1691                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1692                                  "Trying to change FID "DFID
1693                                  " to the "DFID", inode %lu/%u(%p)\n",
1694                                  PFID(&lli->lli_fid), PFID(&body->fid1),
1695                                  inode->i_ino, inode->i_generation, inode);
1696                 } else 
1697                         lli->lli_fid = body->fid1;
1698         }
1699
1700         LASSERT(fid_seq(&lli->lli_fid) != 0);
1701
1702         if (body->valid & OBD_MD_FLSIZE) {
1703                 if ((ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM) &&
1704                     S_ISREG(inode->i_mode) && lli->lli_smd) {
1705                         struct lustre_handle lockh;
1706                         ldlm_mode_t mode;
1707                         
1708                         /* As it is possible a blocking ast has been processed
1709                          * by this time, we need to check there is an UPDATE 
1710                          * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1711                          * it. */
1712                         mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1713                                                &lockh);
1714                         if (mode) {
1715                                 if (lli->lli_flags & (LLIF_DONE_WRITING |
1716                                                       LLIF_EPOCH_PENDING |
1717                                                       LLIF_SOM_DIRTY)) {
1718                                         CERROR("ino %lu flags %lu still has "
1719                                                "size authority! do not trust "
1720                                                "the size got from MDS\n",
1721                                                inode->i_ino, lli->lli_flags);
1722                                 } else {
1723                                         /* Use old size assignment to avoid
1724                                          * deadlock bz14138 & bz14326 */
1725                                         inode->i_size = body->size;
1726                                         lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1727                                 }
1728                                 ldlm_lock_decref(&lockh, mode);
1729                         }
1730                 } else {
1731                         /* Use old size assignment to avoid
1732                          * deadlock bz14138 & bz14326 */
1733                         inode->i_size = body->size;
1734                 }
1735
1736                 if (body->valid & OBD_MD_FLBLOCKS)
1737                         inode->i_blocks = body->blocks;
1738         }
1739
1740         if (body->valid & OBD_MD_FLMDSCAPA) {
1741                 LASSERT(md->mds_capa);
1742                 ll_add_capa(inode, md->mds_capa);
1743         }
1744         if (body->valid & OBD_MD_FLOSSCAPA) {
1745                 LASSERT(md->oss_capa);
1746                 ll_add_capa(inode, md->oss_capa);
1747         }
1748 }
1749
1750 static struct backing_dev_info ll_backing_dev_info = {
1751         .ra_pages       = 0,    /* No readahead */
1752 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
1753         .capabilities   = 0,    /* Does contribute to dirty memory */
1754 #else
1755         .memory_backed  = 0,    /* Does contribute to dirty memory */
1756 #endif
1757 };
1758
1759 void ll_read_inode2(struct inode *inode, void *opaque)
1760 {
1761         struct lustre_md *md = opaque;
1762         struct ll_inode_info *lli = ll_i2info(inode);
1763         ENTRY;
1764
1765         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
1766                inode->i_ino, inode->i_generation, inode);
1767
1768         ll_lli_init(lli);
1769
1770         LASSERT(!lli->lli_smd);
1771
1772         /* Core attributes from the MDS first.  This is a new inode, and
1773          * the VFS doesn't zero times in the core inode so we have to do
1774          * it ourselves.  They will be overwritten by either MDS or OST
1775          * attributes - we just need to make sure they aren't newer. */
1776         LTIME_S(inode->i_mtime) = 0;
1777         LTIME_S(inode->i_atime) = 0;
1778         LTIME_S(inode->i_ctime) = 0;
1779         inode->i_rdev = 0;
1780         ll_update_inode(inode, md);
1781
1782         /* OIDEBUG(inode); */
1783
1784         if (S_ISREG(inode->i_mode)) {
1785                 struct ll_sb_info *sbi = ll_i2sbi(inode);
1786                 inode->i_op = &ll_file_inode_operations;
1787                 inode->i_fop = sbi->ll_fop;
1788                 inode->i_mapping->a_ops = &ll_aops;
1789                 EXIT;
1790         } else if (S_ISDIR(inode->i_mode)) {
1791                 inode->i_op = &ll_dir_inode_operations;
1792                 inode->i_fop = &ll_dir_operations;
1793                 inode->i_mapping->a_ops = &ll_dir_aops;
1794                 EXIT;
1795         } else if (S_ISLNK(inode->i_mode)) {
1796                 inode->i_op = &ll_fast_symlink_inode_operations;
1797                 EXIT;
1798         } else {
1799                 inode->i_op = &ll_special_inode_operations;
1800
1801                 init_special_inode(inode, inode->i_mode,
1802                                    kdev_t_to_nr(inode->i_rdev));
1803
1804                 /* initializing backing dev info. */
1805                 inode->i_mapping->backing_dev_info = &ll_backing_dev_info;
1806
1807                 EXIT;
1808         }
1809 }
1810
1811 void ll_delete_inode(struct inode *inode)
1812 {
1813         struct ll_sb_info *sbi = ll_i2sbi(inode);
1814         int rc;
1815         ENTRY;
1816
1817         rc = obd_fid_delete(sbi->ll_md_exp, ll_inode2fid(inode));
1818         if (rc) {
1819                 CERROR("fid_delete() failed, rc %d\n", rc);
1820         }
1821         clear_inode(inode);
1822
1823         EXIT;
1824 }
1825
1826 int ll_iocontrol(struct inode *inode, struct file *file,
1827                  unsigned int cmd, unsigned long arg)
1828 {
1829         struct ll_sb_info *sbi = ll_i2sbi(inode);
1830         struct ptlrpc_request *req = NULL;
1831         int rc, flags = 0;
1832         ENTRY;
1833
1834         switch(cmd) {
1835         case EXT3_IOC_GETFLAGS: {
1836                 struct mdt_body *body;
1837                 struct obd_capa *oc;
1838
1839                 oc = ll_mdscapa_get(inode);
1840                 rc = md_getattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
1841                                 OBD_MD_FLFLAGS, 0, &req);
1842                 capa_put(oc);
1843                 if (rc) {
1844                         CERROR("failure %d inode %lu\n", rc, inode->i_ino);
1845                         RETURN(-abs(rc));
1846                 }
1847
1848                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1849
1850                 flags = body->flags;
1851
1852                 ptlrpc_req_finished(req);
1853
1854                 RETURN(put_user(flags, (int *)arg));
1855         }
1856         case EXT3_IOC_SETFLAGS: {
1857                 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1858                 struct obd_info oinfo = { { { 0 } } };
1859                 struct md_op_data *op_data;
1860
1861                 if (get_user(flags, (int *)arg))
1862                         RETURN(-EFAULT);
1863
1864                 oinfo.oi_md = lsm;
1865                 OBDO_ALLOC(oinfo.oi_oa);
1866                 if (!oinfo.oi_oa)
1867                         RETURN(-ENOMEM);
1868
1869                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1870                                              LUSTRE_OPC_ANY, NULL);
1871                 if (IS_ERR(op_data))
1872                         RETURN(PTR_ERR(op_data));
1873
1874                 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
1875                 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1876                 rc = md_setattr(sbi->ll_md_exp, op_data,
1877                                 NULL, 0, NULL, 0, &req, NULL);
1878                 ll_finish_md_op_data(op_data);
1879                 ptlrpc_req_finished(req);
1880                 if (rc || lsm == NULL) {
1881                         OBDO_FREE(oinfo.oi_oa);
1882                         RETURN(rc);
1883                 }
1884
1885                 oinfo.oi_oa->o_id = lsm->lsm_object_id;
1886                 oinfo.oi_oa->o_gr = lsm->lsm_object_gr;
1887                 oinfo.oi_oa->o_flags = flags;
1888                 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS | 
1889                                        OBD_MD_FLGROUP;
1890                 oinfo.oi_capa = ll_mdscapa_get(inode);
1891
1892                 obdo_from_inode(oinfo.oi_oa, inode,
1893                                 OBD_MD_FLFID | OBD_MD_FLGENER);
1894                 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1895                 capa_put(oinfo.oi_capa);
1896                 OBDO_FREE(oinfo.oi_oa);
1897                 if (rc) {
1898                         if (rc != -EPERM && rc != -EACCES)
1899                                 CERROR("md_setattr_async fails: rc = %d\n", rc);
1900                         RETURN(rc);
1901                 }
1902
1903                 inode->i_flags = ll_ext_to_inode_flags(flags |
1904                                                        MDS_BFLAG_EXT_FLAGS);
1905                 RETURN(0);
1906         }
1907         default:
1908                 RETURN(-ENOSYS);
1909         }
1910
1911         RETURN(0);
1912 }
1913
1914 int ll_flush_ctx(struct inode *inode)
1915 {
1916         struct ll_sb_info  *sbi = ll_i2sbi(inode);
1917
1918         CDEBUG(D_SEC, "flush context for user %d\n", current->uid);
1919
1920         obd_set_info_async(sbi->ll_md_exp,
1921                            sizeof(KEY_FLUSH_CTX) - 1, KEY_FLUSH_CTX,
1922                            0, NULL, NULL);
1923         obd_set_info_async(sbi->ll_dt_exp,
1924                            sizeof(KEY_FLUSH_CTX) - 1, KEY_FLUSH_CTX,
1925                            0, NULL, NULL);
1926         return 0;
1927 }
1928
1929 /* umount -f client means force down, don't save state */
1930 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
1931 void ll_umount_begin(struct vfsmount *vfsmnt, int flags)
1932 {
1933         struct super_block *sb = vfsmnt->mnt_sb;
1934 #else
1935 void ll_umount_begin(struct super_block *sb)
1936 {
1937 #endif
1938         struct lustre_sb_info *lsi = s2lsi(sb);
1939         struct ll_sb_info *sbi = ll_s2sbi(sb);
1940         struct obd_device *obd;
1941         struct obd_ioctl_data ioc_data = { 0 };
1942         ENTRY;
1943
1944 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
1945         if (!(flags & MNT_FORCE)) {
1946                 EXIT;
1947                 return;
1948         }
1949 #endif
1950
1951         /* Tell the MGC we got umount -f */
1952         lsi->lsi_flags |= LSI_UMOUNT_FORCE;
1953
1954         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
1955                sb->s_count, atomic_read(&sb->s_active));
1956
1957         obd = class_exp2obd(sbi->ll_md_exp);
1958         if (obd == NULL) {
1959                 CERROR("Invalid MDC connection handle "LPX64"\n",
1960                        sbi->ll_md_exp->exp_handle.h_cookie);
1961                 EXIT;
1962                 return;
1963         }
1964         obd->obd_force = 1;
1965         obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp, sizeof ioc_data,
1966                       &ioc_data, NULL);
1967
1968         obd = class_exp2obd(sbi->ll_dt_exp);
1969         if (obd == NULL) {
1970                 CERROR("Invalid LOV connection handle "LPX64"\n",
1971                        sbi->ll_dt_exp->exp_handle.h_cookie);
1972                 EXIT;
1973                 return;
1974         }
1975
1976         obd->obd_force = 1;
1977         obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp, sizeof ioc_data,
1978                       &ioc_data, NULL);
1979
1980         /* Really, we'd like to wait until there are no requests outstanding,
1981          * and then continue.  For now, we just invalidate the requests,
1982          * schedule, and hope.
1983          */
1984         schedule();
1985
1986         EXIT;
1987 }
1988
1989 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
1990 {
1991         struct ll_sb_info *sbi = ll_s2sbi(sb);
1992         int err;
1993         __u32 read_only;
1994
1995         if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
1996                 read_only = *flags & MS_RDONLY;
1997                 err = obd_set_info_async(sbi->ll_md_exp,
1998                                          sizeof(KEY_READ_ONLY) - 1,
1999                                          KEY_READ_ONLY, sizeof(read_only),
2000                                          &read_only, NULL);
2001                 if (err) {
2002                         CERROR("Failed to change the read-only flag during "
2003                                "remount: %d\n", err);
2004                         return err;
2005                 }
2006
2007                 if (read_only)
2008                         sb->s_flags |= MS_RDONLY;
2009                 else
2010                         sb->s_flags &= ~MS_RDONLY;
2011         }
2012         return 0;
2013 }
2014
2015 int ll_prep_inode(struct inode **inode,
2016                   struct ptlrpc_request *req,
2017                   struct super_block *sb)
2018 {
2019         struct ll_sb_info *sbi = NULL;
2020         struct lustre_md md;
2021         int rc = 0;
2022         ENTRY;
2023
2024         LASSERT(*inode || sb);
2025         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2026         prune_deathrow(sbi, 1);
2027         memset(&md, 0, sizeof(struct lustre_md));
2028
2029         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2030                               sbi->ll_md_exp, &md);
2031         if (rc)
2032                 RETURN(rc);
2033
2034         if (*inode) {
2035                 ll_update_inode(*inode, &md);
2036         } else {
2037                 LASSERT(sb != NULL);
2038
2039                 /*
2040                  * At this point server returns to client's same fid as client
2041                  * generated for creating. So using ->fid1 is okay here.
2042                  */
2043                 LASSERT(fid_is_sane(&md.body->fid1));
2044
2045                 *inode = ll_iget(sb, ll_fid_build_ino(sbi, &md.body->fid1), &md);
2046                 if (*inode == NULL || is_bad_inode(*inode)) {
2047                         if (md.lsm)
2048                                 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2049 #ifdef CONFIG_FS_POSIX_ACL
2050                         if (md.posix_acl) {
2051                                 posix_acl_release(md.posix_acl);
2052                                 md.posix_acl = NULL;
2053                         }
2054 #endif
2055                         rc = -ENOMEM;
2056                         CERROR("new_inode -fatal: rc %d\n", rc);
2057                         GOTO(out, rc);
2058                 }
2059         }
2060
2061         rc = obd_checkmd(sbi->ll_dt_exp, sbi->ll_md_exp,
2062                          ll_i2info(*inode)->lli_smd);
2063 out:
2064         md_free_lustre_md(sbi->ll_md_exp, &md);
2065         RETURN(rc);
2066 }
2067
2068 char *llap_origins[] = {
2069         [LLAP_ORIGIN_UNKNOWN] = "--",
2070         [LLAP_ORIGIN_READPAGE] = "rp",
2071         [LLAP_ORIGIN_READAHEAD] = "ra",
2072         [LLAP_ORIGIN_COMMIT_WRITE] = "cw",
2073         [LLAP_ORIGIN_WRITEPAGE] = "wp",
2074 };
2075
2076 struct ll_async_page *llite_pglist_next_llap(struct ll_sb_info *sbi,
2077                                              struct list_head *list)
2078 {
2079         struct ll_async_page *llap;
2080         struct list_head *pos;
2081
2082         list_for_each(pos, list) {
2083                 if (pos == &sbi->ll_pglist)
2084                         return NULL;
2085                 llap = list_entry(pos, struct ll_async_page, llap_pglist_item);
2086                 if (llap->llap_page == NULL)
2087                         continue;
2088                 return llap;
2089         }
2090         LBUG();
2091         return NULL;
2092 }
2093
2094 int ll_obd_statfs(struct inode *inode, void *arg)
2095 {
2096         struct ll_sb_info *sbi = NULL;
2097         struct obd_export *exp;
2098         char *buf = NULL;
2099         struct obd_ioctl_data *data = NULL;
2100         __u32 type;
2101         int len = 0, rc;
2102
2103         if (!inode || !(sbi = ll_i2sbi(inode)))
2104                 GOTO(out_statfs, rc = -EINVAL);
2105
2106         rc = obd_ioctl_getdata(&buf, &len, arg);
2107         if (rc)
2108                 GOTO(out_statfs, rc);
2109
2110         data = (void*)buf;
2111         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2112             !data->ioc_pbuf1 || !data->ioc_pbuf2)
2113                 GOTO(out_statfs, rc = -EINVAL);
2114
2115         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2116         if (type == LL_STATFS_MDC)
2117                 exp = sbi->ll_md_exp;
2118         else if (type == LL_STATFS_LOV)
2119                 exp = sbi->ll_dt_exp;
2120         else 
2121                 GOTO(out_statfs, rc = -ENODEV);
2122
2123         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
2124         if (rc)
2125                 GOTO(out_statfs, rc);
2126 out_statfs:
2127         if (buf)
2128                 obd_ioctl_freedata(buf, len);
2129         return rc;
2130 }
2131
2132 int ll_process_config(struct lustre_cfg *lcfg)
2133 {
2134         char *ptr;
2135         void *sb;
2136         struct lprocfs_static_vars lvars;
2137         unsigned long x; 
2138         int rc = 0;
2139
2140         lprocfs_llite_init_vars(&lvars);
2141
2142         /* The instance name contains the sb: lustre-client-aacfe000 */
2143         ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2144         if (!ptr || !*(++ptr)) 
2145                 return -EINVAL;
2146         if (sscanf(ptr, "%lx", &x) != 1)
2147                 return -EINVAL;
2148         sb = (void *)x;
2149         /* This better be a real Lustre superblock! */
2150         LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2151
2152         /* Note we have not called client_common_fill_super yet, so 
2153            proc fns must be able to handle that! */
2154         rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2155                                       lcfg, sb);
2156         return(rc);
2157 }
2158
2159 /* this function prepares md_op_data hint for passing ot down to MD stack. */
2160 struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data,
2161                                        struct inode *i1, struct inode *i2,
2162                                        const char *name, int namelen,
2163                                        int mode, __u32 opc, void *data)
2164 {
2165         LASSERT(i1 != NULL);
2166
2167         if (namelen > ll_i2sbi(i1)->ll_namelen)
2168                 return ERR_PTR(-ENAMETOOLONG);
2169         
2170         if (op_data == NULL)
2171                 OBD_ALLOC_PTR(op_data);
2172         
2173         if (op_data == NULL)
2174                 return ERR_PTR(-ENOMEM);
2175
2176         ll_i2gids(op_data->op_suppgids, i1, i2);
2177         op_data->op_fid1 = *ll_inode2fid(i1);
2178         op_data->op_capa1 = ll_mdscapa_get(i1);
2179
2180         if (i2) {
2181                 op_data->op_fid2 = *ll_inode2fid(i2);
2182                 op_data->op_capa2 = ll_mdscapa_get(i2);
2183         } else {
2184                 fid_zero(&op_data->op_fid2);
2185         }
2186
2187         op_data->op_name = name;
2188         op_data->op_namelen = namelen;
2189         op_data->op_mode = mode;
2190         op_data->op_mod_time = CURRENT_SECONDS;
2191         op_data->op_fsuid = current->fsuid;
2192         op_data->op_fsgid = current->fsgid;
2193         op_data->op_cap = current->cap_effective;
2194         op_data->op_bias = MDS_CHECK_SPLIT;
2195         op_data->op_opc = opc;
2196         op_data->op_mds = 0;
2197         op_data->op_data = data;
2198
2199         return op_data;
2200 }
2201
2202 void ll_finish_md_op_data(struct md_op_data *op_data)
2203 {
2204         capa_put(op_data->op_capa1);
2205         capa_put(op_data->op_capa2);
2206         OBD_FREE_PTR(op_data);
2207 }