1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
7 * Author: Andreas Dilger <adilger@clusterfs.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define DEBUG_SUBSYSTEM S_LLITE
26 #include <linux/lustre_dlm.h>
27 #include <linux/lustre_lite.h>
28 #include <linux/obd_lov.h> /* for lov_mds_md_size() in lov_setstripe() */
29 #include <linux/random.h>
30 #include <linux/pagemap.h>
31 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
32 #include <linux/lustre_compat25.h>
35 int ll_inode_setattr(struct inode *inode, struct iattr *attr, int do_trunc);
36 extern int ll_setattr(struct dentry *de, struct iattr *attr);
38 static int ll_mdc_close(struct lustre_handle *mdc_conn, struct inode *inode,
41 struct ll_file_data *fd = file->private_data;
42 struct ptlrpc_request *req = NULL;
44 struct obd_import *imp;
48 /* Complete the open request and remove it from replay list */
49 rc = mdc_close(&ll_i2sbi(inode)->ll_mdc_conn, inode->i_ino,
50 inode->i_mode, &fd->fd_mds_och.och_fh, &req);
52 CERROR("inode %lu close failed: rc = %d\n", inode->i_ino, rc);
54 imp = fd->fd_mds_och.och_req->rq_import;
56 spin_lock_irqsave(&imp->imp_lock, flags);
58 DEBUG_REQ(D_HA, fd->fd_mds_och.och_req, "matched open req %p",
59 fd->fd_mds_och.och_req);
61 /* We held on to the request for replay until we saw a close for that
62 * file. Now that we've closed it, it gets replayed on the basis of
63 * its transno only. */
64 spin_lock (&fd->fd_mds_och.och_req->rq_lock);
65 fd->fd_mds_och.och_req->rq_replay = 0;
66 spin_unlock (&fd->fd_mds_och.och_req->rq_lock);
68 if (fd->fd_mds_och.och_req->rq_transno) {
69 /* This open created a file, so it needs replay as a
70 * normal transaction now. Our reference to it now
71 * effectively owned by the imp_replay_list, and it'll
72 * be committed just like other transno-having
73 * requests from here on out. */
75 /* We now retain this close request, so that it is
76 * replayed if the open is replayed. We duplicate the
77 * transno, so that we get freed at the right time,
78 * and rely on the difference in xid to keep
79 * everything ordered correctly.
81 * But! If this close was already given a transno
82 * (because it caused real unlinking of an
83 * open-unlinked file, f.e.), then we'll be ordered on
84 * the basis of that and we don't need to do anything
86 if (!req->rq_transno) {
87 req->rq_transno = fd->fd_mds_och.och_req->rq_transno;
88 ptlrpc_retain_replayable_request(req, imp);
90 spin_unlock_irqrestore(&imp->imp_lock, flags);
92 /* Should we free_committed now? we always free before
93 * replay, so it's probably a wash. We could check to
94 * see if the fd_req should already be committed, in
95 * which case we can avoid the whole retain_replayable
98 /* No transno means that we can just drop our ref. */
99 spin_unlock_irqrestore(&imp->imp_lock, flags);
101 ptlrpc_req_finished(fd->fd_mds_och.och_req);
103 /* Do this after the fd_req->rq_transno check, because we don't want
104 * to bounce off zero references. */
105 ptlrpc_req_finished(req);
106 fd->fd_mds_och.och_fh.cookie = DEAD_HANDLE_MAGIC;
107 file->private_data = NULL;
108 OBD_SLAB_FREE(fd, ll_file_data_slab, sizeof *fd);
113 /* While this returns an error code, fput() the caller does not, so we need
114 * to make every effort to clean up all of our state here. Also, applications
115 * rarely check close errors and even if an error is returned they will not
116 * re-try the close call.
118 int ll_file_release(struct inode *inode, struct file *file)
120 struct ll_file_data *fd;
122 struct ll_sb_info *sbi = ll_i2sbi(inode);
123 struct ll_inode_info *lli = ll_i2info(inode);
124 struct lov_stripe_md *lsm = lli->lli_smd;
128 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
129 inode->i_generation, inode);
131 /* don't do anything for / */
132 if (inode->i_sb->s_root == file->f_dentry)
135 fd = (struct ll_file_data *)file->private_data;
136 if (!fd) /* no process opened the file after an mcreate */
139 /* we might not be able to get a valid handle on this file
140 * again so we really want to flush our write cache.. */
141 if (S_ISREG(inode->i_mode)) {
142 filemap_fdatasync(inode->i_mapping);
143 filemap_fdatawait(inode->i_mapping);
146 memset(&oa, 0, sizeof(oa));
147 oa.o_id = lsm->lsm_object_id;
149 oa.o_valid = OBD_MD_FLTYPE | OBD_MD_FLID;
151 memcpy(&oa.o_inline, &fd->fd_ost_och, FD_OSTDATA_SIZE);
152 oa.o_valid |= OBD_MD_FLHANDLE;
154 rc = obd_close(&sbi->ll_osc_conn, &oa, lsm, NULL);
156 CERROR("inode %lu object close failed: rc = "
157 "%d\n", inode->i_ino, rc);
161 rc2 = ll_mdc_close(&sbi->ll_mdc_conn, inode, file);
168 static int ll_local_open(struct file *file, struct lookup_intent *it)
170 struct ptlrpc_request *req = it->it_data;
171 struct ll_file_data *fd;
172 struct mds_body *body;
175 body = lustre_msg_buf (req->rq_repmsg, 1, sizeof (*body));
176 LASSERT (body != NULL); /* reply already checked out */
177 LASSERT_REPSWABBED (req, 1); /* and swabbed down */
179 LASSERT(!file->private_data);
181 OBD_SLAB_ALLOC(fd, ll_file_data_slab, SLAB_KERNEL, sizeof *fd);
182 /* We can't handle this well without reorganizing ll_file_open and
183 * ll_mdc_close, so don't even try right now. */
186 memset(fd, 0, sizeof(*fd));
188 memcpy(&fd->fd_mds_och.och_fh, &body->handle, sizeof(body->handle));
189 fd->fd_mds_och.och_req = it->it_data;
190 file->private_data = fd;
195 static int ll_osc_open(struct lustre_handle *conn, struct inode *inode,
196 struct file *file, struct lov_stripe_md *lsm)
198 struct ll_file_data *fd = file->private_data;
206 oa->o_id = lsm->lsm_object_id;
207 oa->o_mode = S_IFREG;
208 oa->o_valid = (OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLBLOCKS |
209 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
210 rc = obd_open(conn, oa, lsm, NULL, &fd->fd_ost_och);
214 file->f_flags &= ~O_LOV_DELAY_CREATE;
215 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
216 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
224 /* Caller must hold lli_open_sem to protect lli->lli_smd from changing and
225 * duplicate objects from being created. We only install lsm to lli_smd if
226 * the mdc open was successful (hence stored stripe MD on MDS), otherwise
227 * other nodes could try to create different objects for the same file.
229 static int ll_create_obj(struct lustre_handle *conn, struct inode *inode,
230 struct file *file, struct lov_stripe_md *lsm)
232 struct ptlrpc_request *req = NULL;
233 struct ll_inode_info *lli = ll_i2info(inode);
234 struct lov_mds_md *lmm = NULL;
237 struct mdc_op_data op_data;
238 int rc, err, lmm_size = 0;;
245 oa->o_mode = S_IFREG | 0600;
246 oa->o_id = inode->i_ino;
247 /* Keep these 0 for now, because chown/chgrp does not change the
248 * ownership on the OST, and we don't want to allow BA OST NFS
249 * users to access these objects by mistake. */
252 oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLMODE |
253 OBD_MD_FLUID | OBD_MD_FLGID;
255 rc = obd_create(conn, oa, &lsm, NULL);
257 CERROR("error creating objects for inode %lu: rc = %d\n",
260 CERROR("obd_create returned invalid rc %d\n", rc);
265 obdo_to_inode(inode, oa, OBD_MD_FLBLKSZ);
267 LASSERT(lsm && lsm->lsm_object_id);
268 rc = obd_packmd(conn, &lmm, lsm);
270 GOTO(out_destroy, rc);
274 /* Save the stripe MD with this file on the MDS */
275 memset(&iattr, 0, sizeof(iattr));
276 iattr.ia_valid = ATTR_FROM_OPEN;
278 ll_prepare_mdc_op_data(&op_data, inode, NULL, NULL, 0, 0);
280 rc = mdc_setattr(&ll_i2sbi(inode)->ll_mdc_conn, &op_data,
281 &iattr, lmm, lmm_size, &req);
282 ptlrpc_req_finished(req);
284 obd_free_diskmd (conn, &lmm);
286 /* If we couldn't complete mdc_open() and store the stripe MD on the
287 * MDS, we need to destroy the objects now or they will be leaked.
290 CERROR("error: storing stripe MD for %lu: rc %d\n",
292 GOTO(out_destroy, rc);
295 lli->lli_maxbytes = lsm->lsm_maxbytes;
303 obdo_from_inode(oa, inode, OBD_MD_FLTYPE);
304 oa->o_id = lsm->lsm_object_id;
305 oa->o_valid |= OBD_MD_FLID;
306 err = obd_destroy(conn, oa, lsm, NULL);
307 obd_free_memmd(conn, &lsm);
309 CERROR("error uncreating inode %lu objects: rc %d\n",
314 /* Open a file, and (for the very first open) create objects on the OSTs at
315 * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
316 * creation or open until ll_lov_setstripe() ioctl is called. We grab
317 * lli_open_sem to ensure no other process will create objects, send the
318 * stripe MD to the MDS, or try to destroy the objects if that fails.
320 * If we already have the stripe MD locally then we don't request it in
321 * mdc_open(), by passing a lmm_size = 0.
323 * It is up to the application to ensure no other processes open this file
324 * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
325 * used. We might be able to avoid races of that sort by getting lli_open_sem
326 * before returning in the O_LOV_DELAY_CREATE case and dropping it here
327 * or in ll_file_release(), but I'm not sure that is desirable/necessary.
329 extern int ll_it_open_error(int phase, struct lookup_intent *it);
331 int ll_file_open(struct inode *inode, struct file *file)
333 struct ll_sb_info *sbi = ll_i2sbi(inode);
334 struct ll_inode_info *lli = ll_i2info(inode);
335 struct lustre_handle *conn = ll_i2obdconn(inode);
336 struct lookup_intent *it;
337 struct lov_stripe_md *lsm;
341 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
342 inode->i_generation, inode);
344 /* don't do anything for / */
345 if (inode->i_sb->s_root == file->f_dentry)
348 LL_GET_INTENT(file->f_dentry, it);
349 rc = ll_it_open_error(IT_OPEN_OPEN, it);
353 rc = ll_local_open(file, it);
357 mdc_set_open_replay_data(&((struct ll_file_data *)
358 file->private_data)->fd_mds_och);
359 if (!S_ISREG(inode->i_mode))
364 if (file->f_flags & O_LOV_DELAY_CREATE) {
365 CDEBUG(D_INODE, "delaying object creation\n");
368 down(&lli->lli_open_sem);
370 rc = ll_create_obj(conn, inode, file, NULL);
371 up(&lli->lli_open_sem);
375 CERROR("warning: stripe already set on ino %lu\n",
377 up(&lli->lli_open_sem);
382 rc = ll_osc_open(conn, inode, file, lsm);
388 ll_mdc_close(&sbi->ll_mdc_conn, inode, file);
393 * really does the getattr on the inode and updates its fields
395 int ll_inode_getattr(struct inode *inode, struct lov_stripe_md *lsm,
398 struct ll_sb_info *sbi = ll_i2sbi(inode);
399 struct ll_inode_info *lli = ll_i2info(inode);
400 struct ptlrpc_request_set *set;
403 unsigned long before, after;
411 memset(&oa, 0, sizeof oa);
412 oa.o_id = lsm->lsm_object_id;
414 oa.o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLSIZE |
415 OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
418 if (ostdata != NULL) {
419 memcpy(&oa.o_inline, ostdata, FD_OSTDATA_SIZE);
420 oa.o_valid |= OBD_MD_FLHANDLE;
423 /* getattr can race with writeback. we don't want to trust a getattr
424 * that doesn't include the writeback of our farthest cached pages
425 * that it raced with. */
427 bef = ll_farthest_dirty(&lli->lli_dirty, &before);
429 rc = obd_getattr(&sbi->ll_osc_conn, &oa, lsm);
431 set = ptlrpc_prep_set ();
433 CERROR ("ENOMEM allocing request set\n");
436 rc = obd_getattr_async(&sbi->ll_osc_conn, &oa, lsm, set);
438 rc = ptlrpc_set_wait (set);
439 ptlrpc_set_destroy (set);
445 aft = ll_farthest_dirty(&lli->lli_dirty, &after);
446 CDEBUG(D_INODE, " %d,%lu -> %d,%lu\n", bef, before, aft, after);
448 (aft != 0 || after < before) &&
449 oa.o_size < ((u64)before + 1) << PAGE_CACHE_SHIFT);
451 obdo_to_inode(inode, &oa, (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
452 OBD_MD_FLMTIME | OBD_MD_FLCTIME));
453 if (inode->i_blksize < PAGE_CACHE_SIZE)
454 inode->i_blksize = PAGE_CACHE_SIZE;
456 /* make sure getattr doesn't return a size that causes writeback
457 * to forget about cached writes */
458 if ((aft == 0) && oa.o_size < ((u64)after + 1) << PAGE_CACHE_SHIFT) {
459 CDEBUG(D_INODE, "cached at %lu, keeping %llu i_size instead "
460 "of oa "LPU64"\n", after, inode->i_size,
465 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE);
467 CDEBUG(D_INODE, "objid "LPX64" size %Lu/%Lu blksize %lu\n",
468 lsm->lsm_object_id, inode->i_size, inode->i_size,
474 * some callers, notably truncate, really don't want i_size set based
475 * on the the size returned by the getattr, or lock acquisition in
478 int ll_extent_lock_no_validate(struct ll_file_data *fd, struct inode *inode,
479 struct lov_stripe_md *lsm,
480 int mode, struct ldlm_extent *extent,
481 struct lustre_handle *lockh)
483 struct ll_sb_info *sbi = ll_i2sbi(inode);
487 LASSERT(lockh->cookie == 0);
489 /* XXX phil: can we do this? won't it screw the file size up? */
490 if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
491 (sbi->ll_flags & LL_SBI_NOLCK))
494 CDEBUG(D_DLMTRACE, "Locking inode %lu, start "LPU64" end "LPU64"\n",
495 inode->i_ino, extent->start, extent->end);
497 rc = obd_enqueue(&sbi->ll_osc_conn, lsm, NULL, LDLM_EXTENT, extent,
498 sizeof(extent), mode, &flags, ll_lock_callback,
499 inode, sizeof(*inode), lockh);
505 * this grabs a lock and manually implements behaviour that makes it look like
506 * the OST is returning the file size with each lock acquisition.
508 int ll_extent_lock(struct ll_file_data *fd, struct inode *inode,
509 struct lov_stripe_md *lsm,
510 int mode, struct ldlm_extent *extent,
511 struct lustre_handle *lockh)
513 struct ll_inode_info *lli = ll_i2info(inode);
514 struct ldlm_extent size_lock;
515 struct lustre_handle match_lockh = {0};
516 int flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED;
520 rc = ll_extent_lock_no_validate(fd, inode, lsm, mode, extent, lockh);
524 if (test_bit(LLI_F_HAVE_SIZE_LOCK, &lli->lli_flags))
527 rc = ll_inode_getattr(inode, lsm, fd ? &fd->fd_ost_och : NULL);
529 ll_extent_unlock(fd, inode, lsm, mode, lockh);
533 size_lock.start = inode->i_size;
534 size_lock.end = OBD_OBJECT_EOF;
536 /* XXX I bet we should be checking the lock ignore flags.. */
537 matched = obd_match(&ll_i2sbi(inode)->ll_osc_conn, lsm, LDLM_EXTENT,
538 &size_lock, sizeof(size_lock), LCK_PR, &flags,
541 /* hey, alright, we hold a size lock that covers the size we
542 * just found, its not going to change for a while.. */
544 set_bit(LLI_F_HAVE_SIZE_LOCK, &lli->lli_flags);
545 obd_cancel(&ll_i2sbi(inode)->ll_osc_conn, lsm, LCK_PR,
552 int ll_extent_unlock(struct ll_file_data *fd, struct inode *inode,
553 struct lov_stripe_md *lsm, int mode,
554 struct lustre_handle *lockh)
556 struct ll_sb_info *sbi = ll_i2sbi(inode);
560 /* XXX phil: can we do this? won't it screw the file size up? */
561 if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
562 (sbi->ll_flags & LL_SBI_NOLCK))
565 rc = obd_cancel(&sbi->ll_osc_conn, lsm, mode, lockh);
570 static inline void ll_remove_suid(struct inode *inode)
574 /* set S_IGID if S_IXGRP is set, and always set S_ISUID */
575 mode = (inode->i_mode & S_IXGRP)*(S_ISGID/S_IXGRP) | S_ISUID;
577 /* was any of the uid bits set? */
578 mode &= inode->i_mode;
579 if (mode && !capable(CAP_FSETID)) {
580 inode->i_mode &= ~mode;
581 // XXX careful here - we cannot change the size
586 static void ll_update_atime(struct inode *inode)
591 attr.ia_atime = LTIME_S(CURRENT_TIME);
592 attr.ia_valid = ATTR_ATIME;
594 if (inode->i_atime == attr.ia_atime) return;
595 if (IS_RDONLY(inode)) return;
596 if (IS_NOATIME(inode)) return;
598 /* ll_inode_setattr() sets inode->i_atime from attr.ia_atime */
599 ll_inode_setattr(inode, &attr, 0);
601 /* update atime, but don't explicitly write it out just this change */
602 inode->i_atime = CURRENT_TIME;
608 * flush the page cache for an extent as its canceled. when we're on an
609 * lov we get a lock cancelation for each of the obd locks under the lov
610 * so we have to map the obd's region back onto the stripes in the file
613 * no one can dirty the extent until we've finished our work and they
614 * can enqueue another lock.
616 * XXX this could be asking the inode's dirty tree for info
618 void ll_pgcache_remove_extent(struct inode *inode, struct lov_stripe_md *lsm,
619 struct ldlm_lock *lock)
621 struct ldlm_extent *extent = &lock->l_extent;
622 unsigned long start, end, count, skip, i, j;
627 CDEBUG(D_INODE, "obdo %lu inode %p ["LPU64"->"LPU64"] size: %llu\n",
628 inode->i_ino, inode, extent->start, extent->end, inode->i_size);
630 start = extent->start >> PAGE_CACHE_SHIFT;
633 end = (extent->end >> PAGE_CACHE_SHIFT) + 1;
634 if ((end << PAGE_CACHE_SHIFT) < extent->end)
636 if (lsm->lsm_stripe_count > 1) {
639 struct ldlm_lock *lock;
640 struct lov_stripe_md *lsm;
641 } key = { .name = "lock_to_stripe", .lock = lock, .lsm = lsm };
643 __u32 vallen = sizeof(stripe);
646 /* get our offset in the lov */
647 rc = obd_get_info(ll_i2obdconn(inode), sizeof(key),
648 &key, &vallen, &stripe);
650 CERROR("obd_get_info: rc = %d\n", rc);
653 LASSERT(stripe < lsm->lsm_stripe_count);
655 count = lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT;
656 skip = (lsm->lsm_stripe_count - 1) * count;
657 start += (start/count * skip) + (stripe * count);
659 end += (end/count * skip) + (stripe * count);
662 i = (inode->i_size + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
664 clear_bit(LLI_F_HAVE_SIZE_LOCK, &(ll_i2info(inode)->lli_flags));
668 CDEBUG(D_INODE, "start: %lu j: %lu count: %lu skip: %lu end: %lu\n",
669 start, start % count, count, skip, end);
671 /* start writeback on dirty pages in the extent when its PW */
672 for (i = start, j = start % count;
673 lock->l_granted_mode == LCK_PW && i < end; j++, i++) {
678 /* its unlikely, but give us a chance to bail when we're out */
679 PGCACHE_WRLOCK(inode->i_mapping);
680 if (list_empty(&inode->i_mapping->dirty_pages)) {
681 CDEBUG(D_INODE, "dirty list empty\n");
682 PGCACHE_WRUNLOCK(inode->i_mapping);
685 PGCACHE_WRUNLOCK(inode->i_mapping);
690 page = find_get_page(inode->i_mapping, i);
693 if (!PageDirty(page) || TryLockPage(page)) {
694 page_cache_release(page);
697 if (PageDirty(page)) {
698 CDEBUG(D_INODE, "writing page %p\n", page);
699 PGCACHE_WRLOCK(inode->i_mapping);
700 list_del(&page->list);
701 list_add(&page->list, &inode->i_mapping->locked_pages);
702 PGCACHE_WRUNLOCK(inode->i_mapping);
704 /* this writepage might write out pages outside
705 * this extent, but that's ok, the pages are only
706 * still dirty because a lock still covers them */
707 ClearPageDirty(page);
708 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
709 ret = inode->i_mapping->a_ops->writepage(page);
711 ret = inode->i_mapping->a_ops->writepage(page, NULL);
718 page_cache_release(page);
722 /* our locks are page granular thanks to osc_enqueue, we invalidate the
724 LASSERT((extent->start & ~PAGE_CACHE_MASK) == 0);
725 LASSERT(((extent->end+1) & ~PAGE_CACHE_MASK) == 0);
726 for (i = start, j = start % count ; i < end ; j++, i++) {
731 PGCACHE_WRLOCK(inode->i_mapping);
732 if (list_empty(&inode->i_mapping->dirty_pages) &&
733 list_empty(&inode->i_mapping->clean_pages) &&
734 list_empty(&inode->i_mapping->locked_pages)) {
735 CDEBUG(D_INODE, "nothing left\n");
736 PGCACHE_WRUNLOCK(inode->i_mapping);
739 PGCACHE_WRUNLOCK(inode->i_mapping);
742 page = find_get_page(inode->i_mapping, i);
745 CDEBUG(D_INODE, "dropping page %p at %lu\n", page, page->index);
747 if (page->mapping) /* might have raced */
748 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
749 truncate_complete_page(page);
751 truncate_complete_page(page->mapping, page);
754 page_cache_release(page);
759 int ll_lock_callback(struct ldlm_lock *lock, struct ldlm_lock_desc *new,
760 void *data, int flag)
762 struct inode *inode = data;
763 struct ll_inode_info *lli = ll_i2info(inode);
764 struct lustre_handle lockh = { 0 };
768 LASSERT(inode != NULL);
771 case LDLM_CB_BLOCKING:
772 ldlm_lock2handle(lock, &lockh);
773 rc = ldlm_cli_cancel(&lockh);
775 CERROR("ldlm_cli_cancel failed: %d\n", rc);
777 case LDLM_CB_CANCELING:
778 /* FIXME: we could be given 'canceling intents' so that we
779 * could know to write-back or simply throw away the pages
780 * based on if the cancel comes from a desire to, say,
781 * read or truncate.. */
782 LASSERT((unsigned long)inode > 0x1000);
783 LASSERT((unsigned long)lli > 0x1000);
784 LASSERT((unsigned long)lli->lli_smd > 0x1000);
785 ll_pgcache_remove_extent(inode, lli->lli_smd, lock);
794 static ssize_t ll_file_read(struct file *filp, char *buf, size_t count,
797 struct ll_file_data *fd = filp->private_data;
798 struct inode *inode = filp->f_dentry->d_inode;
799 struct ll_inode_info *lli = ll_i2info(inode);
800 struct lov_stripe_md *lsm = lli->lli_smd;
801 struct lustre_handle lockh = { 0 };
802 struct ll_read_extent rextent;
806 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
807 inode->i_ino, inode->i_generation, inode, count, *ppos);
809 /* "If nbyte is 0, read() will return 0 and have no other results."
810 * -- Single Unix Spec */
814 /* grab a -> eof extent to push extending writes out of node's caches
815 * so we can see them at the getattr after lock acquisition. this will
816 * turn into a seperate [*ppos + count, EOF] 'size intent' lock attempt
818 rextent.re_extent.start = *ppos;
819 rextent.re_extent.end = OBD_OBJECT_EOF;
821 err = ll_extent_lock(fd, inode, lsm, LCK_PR, &rextent.re_extent,&lockh);
825 /* XXX tell ll_readpage what pages have a PR lock.. */
826 rextent.re_task = current;
827 spin_lock(&lli->lli_read_extent_lock);
828 list_add(&rextent.re_lli_item, &lli->lli_read_extents);
829 spin_unlock(&lli->lli_read_extent_lock);
831 CDEBUG(D_INFO, "Reading inode %lu, "LPSZ" bytes, offset %Ld\n",
832 inode->i_ino, count, *ppos);
833 retval = generic_file_read(filp, buf, count, ppos);
835 spin_lock(&lli->lli_read_extent_lock);
836 list_del(&rextent.re_lli_item);
837 spin_unlock(&lli->lli_read_extent_lock);
840 ll_extent_unlock(fd, inode, lsm, LCK_PR, &lockh);
845 * Write to a file (through the page cache).
848 ll_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
850 struct ll_file_data *fd = file->private_data;
851 struct inode *inode = file->f_dentry->d_inode;
852 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
853 struct lustre_handle lockh = { 0 };
854 struct ldlm_extent extent;
855 loff_t maxbytes = ll_file_maxbytes(inode);
858 char should_validate = 1;
860 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
861 inode->i_ino, inode->i_generation, inode, count, *ppos);
864 * sleep doing some writeback work of this mount's dirty data
865 * if the VM thinks we're low on memory.. other dirtying code
866 * paths should think about doing this, too, but they should be
867 * careful not to hold locked pages while they do so. like
868 * ll_prepare_write. *cough*
870 LL_CHECK_DIRTY(inode->i_sb);
872 /* POSIX, but surprised the VFS doesn't check this already */
876 if (file->f_flags & O_APPEND) {
878 extent.end = OBD_OBJECT_EOF;
880 extent.start = *ppos;
881 extent.end = *ppos + count - 1;
882 /* we really don't care what i_size is if we're doing
883 * fully page aligned writes */
884 if ((*ppos & ~PAGE_CACHE_MASK) == 0 &&
885 (count & ~PAGE_CACHE_MASK) == 0)
890 err = ll_extent_lock(fd, inode, lsm, LCK_PW, &extent, &lockh);
892 err = ll_extent_lock_no_validate(fd, inode, lsm, LCK_PW,
897 /* this is ok, g_f_w will overwrite this under i_sem if it races
898 * with a local truncate, it just makes our maxbyte checking easier */
899 if (file->f_flags & O_APPEND)
900 *ppos = inode->i_size;
902 if (*ppos >= maxbytes) {
903 if (count || *ppos > maxbytes) {
904 send_sig(SIGXFSZ, current, 0);
905 GOTO(out, retval = -EFBIG);
908 if (*ppos + count > maxbytes)
909 count = maxbytes - *ppos;
911 CDEBUG(D_INFO, "Writing inode %lu, "LPSZ" bytes, offset %Lu\n",
912 inode->i_ino, count, *ppos);
914 /* generic_file_write handles O_APPEND after getting i_sem */
915 retval = generic_file_write(file, buf, count, ppos);
919 ll_extent_unlock(fd, inode, lsm, LCK_PW, &lockh);
923 static int ll_lov_setstripe(struct inode *inode, struct file *file,
926 struct ll_inode_info *lli = ll_i2info(inode);
927 struct lustre_handle *conn = ll_i2obdconn(inode);
928 struct lov_stripe_md *lsm;
932 down(&lli->lli_open_sem);
935 up(&lli->lli_open_sem);
936 CERROR("stripe already exists for ino %lu\n", inode->i_ino);
937 /* If we haven't already done the open, do so now */
938 if (file->f_flags & O_LOV_DELAY_CREATE) {
939 int rc2 = ll_osc_open(conn, inode, file, lsm);
947 rc = obd_iocontrol(LL_IOC_LOV_SETSTRIPE, conn, 0, &lsm, (void *)arg);
949 up(&lli->lli_open_sem);
952 rc = ll_create_obj(conn, inode, file, lsm);
953 up(&lli->lli_open_sem);
956 obd_free_memmd(conn, &lsm);
959 rc = ll_osc_open(conn, inode, file, lli->lli_smd);
963 static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
965 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
966 struct lustre_handle *conn = ll_i2obdconn(inode);
971 return obd_iocontrol(LL_IOC_LOV_GETSTRIPE, conn, 0, lsm, (void *)arg);
974 int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
977 struct ll_file_data *fd = file->private_data;
978 struct lustre_handle *conn;
980 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%u\n", inode->i_ino,
981 inode->i_generation, inode, cmd);
983 if ((cmd & 0xffffff00) == ((int)'T') << 8) /* tty ioctls */
987 case LL_IOC_GETFLAGS:
988 /* Get the current value of the file flags */
989 return put_user(fd->fd_flags, (int *)arg);
990 case LL_IOC_SETFLAGS:
991 case LL_IOC_CLRFLAGS:
992 /* Set or clear specific file flags */
993 /* XXX This probably needs checks to ensure the flags are
994 * not abused, and to handle any flag side effects.
996 if (get_user(flags, (int *) arg))
999 if (cmd == LL_IOC_SETFLAGS)
1000 fd->fd_flags |= flags;
1002 fd->fd_flags &= ~flags;
1004 case LL_IOC_LOV_SETSTRIPE:
1005 return ll_lov_setstripe(inode, file, arg);
1006 case LL_IOC_LOV_GETSTRIPE:
1007 return ll_lov_getstripe(inode, arg);
1009 /* We need to special case any other ioctls we want to handle,
1010 * to send them to the MDS/OST as appropriate and to properly
1011 * network encode the arg field.
1012 case EXT2_IOC_GETFLAGS:
1013 case EXT2_IOC_SETFLAGS:
1014 case EXT2_IOC_GETVERSION_OLD:
1015 case EXT2_IOC_GETVERSION_NEW:
1016 case EXT2_IOC_SETVERSION_OLD:
1017 case EXT2_IOC_SETVERSION_NEW:
1020 conn = ll_i2obdconn(inode);
1021 return obd_iocontrol(cmd, conn, 0, NULL, (void *)arg);
1025 loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
1027 struct inode *inode = file->f_dentry->d_inode;
1028 struct ll_file_data *fd = file->private_data;
1029 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1030 struct lustre_handle lockh = {0};
1033 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),to=%llu\n", inode->i_ino,
1034 inode->i_generation, inode,
1035 offset + ((origin==2) ? inode->i_size : file->f_pos));
1037 if (origin == 2) { /* SEEK_END */
1039 struct ldlm_extent extent = {0, OBD_OBJECT_EOF};
1040 err = ll_extent_lock(fd, inode, lsm, LCK_PR, &extent, &lockh);
1041 if (err != ELDLM_OK)
1044 offset += inode->i_size;
1045 } else if (origin == 1) { /* SEEK_CUR */
1046 offset += file->f_pos;
1050 if (offset >= 0 && offset <= ll_file_maxbytes(inode)) {
1051 if (offset != file->f_pos) {
1052 file->f_pos = offset;
1053 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1055 file->f_version = ++event;
1062 ll_extent_unlock(fd, inode, lsm, LCK_PR, &lockh);
1066 int ll_fsync(struct file *file, struct dentry *dentry, int data)
1069 struct inode *inode = dentry->d_inode;
1071 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1072 inode->i_generation, inode);
1075 * filemap_fdata{sync,wait} are also called at PW lock cancelation so
1076 * we know that they can only find data to writeback here if we are
1077 * still holding the PW lock that covered the dirty pages. XXX we
1078 * should probably get a reference on it, though, just to be clear.
1080 ret = filemap_fdatasync(dentry->d_inode->i_mapping);
1082 ret = filemap_fdatawait(dentry->d_inode->i_mapping);
1087 int ll_inode_revalidate(struct dentry *dentry)
1089 struct inode *inode = dentry->d_inode;
1090 struct lov_stripe_md *lsm = NULL;
1094 CERROR("REPORT THIS LINE TO PETER\n");
1097 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%s\n",
1098 inode->i_ino, inode->i_generation, inode, dentry->d_name.name);
1100 /* this is very tricky. it is unsafe to call ll_have_md_lock
1101 when we have a referenced lock: because it may cause an RPC
1102 below when the lock is marked CB_PENDING. That RPC may not
1103 go out because someone else may be in another RPC waiting for
1105 if (!(dentry->d_it && dentry->d_it->it_lock_mode) &&
1106 !ll_have_md_lock(dentry)) {
1107 struct ptlrpc_request *req = NULL;
1108 struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
1110 struct mds_body *body;
1111 struct lov_mds_md *lmm;
1112 unsigned long valid = 0;
1113 int eadatalen = 0, rc;
1115 /* Why don't we update all valid MDS fields here, if we're
1116 * doing an RPC anyways? -phil */
1117 if (S_ISREG(inode->i_mode)) {
1118 eadatalen = obd_size_diskmd(&sbi->ll_osc_conn, NULL);
1119 valid |= OBD_MD_FLEASIZE;
1121 ll_inode2fid(&fid, inode);
1122 rc = mdc_getattr(&sbi->ll_mdc_conn, &fid,
1123 valid, eadatalen, &req);
1125 CERROR("failure %d inode %lu\n", rc, inode->i_ino);
1129 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
1130 LASSERT (body != NULL); /* checked by mdc_getattr() */
1131 LASSERT_REPSWABBED (req, 0); /* swabbed by mdc_getattr() */
1133 if (S_ISREG(inode->i_mode) &&
1134 (body->valid & (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS))) {
1135 CERROR("MDS sent back size for regular file\n");
1136 body->valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
1139 /* XXX Too paranoid? */
1140 if ((body->valid ^ valid) & OBD_MD_FLEASIZE)
1141 CERROR("Asked for %s eadata but got %s\n",
1142 (valid & OBD_MD_FLEASIZE) ? "some" : "no",
1143 (body->valid & OBD_MD_FLEASIZE) ? "some":"none");
1145 if (S_ISREG(inode->i_mode) &&
1146 (body->valid & OBD_MD_FLEASIZE)) {
1147 if (body->eadatasize == 0) { /* no EA data */
1148 CERROR("OBD_MD_FLEASIZE set but no data\n");
1151 /* Only bother with this if inode's lsm not set? */
1152 lmm = lustre_msg_buf(req->rq_repmsg,1,body->eadatasize);
1153 LASSERT(lmm != NULL); /* mdc_getattr() checked */
1154 LASSERT_REPSWABBED(req, 1); /* mdc_getattr() swabbed */
1156 rc = obd_unpackmd (&sbi->ll_osc_conn,
1157 &lsm, lmm, body->eadatasize);
1159 CERROR("Error %d unpacking eadata\n", rc);
1160 ptlrpc_req_finished(req);
1163 LASSERT(rc >= sizeof (*lsm));
1166 ll_update_inode(inode, body, lsm);
1167 if (lsm != NULL && ll_i2info(inode)->lli_smd != lsm)
1168 obd_free_memmd(&sbi->ll_osc_conn, &lsm);
1170 ptlrpc_req_finished(req);
1173 lsm = ll_i2info(inode)->lli_smd;
1174 if (!lsm) /* object not yet allocated, don't validate size */
1178 * unfortunately stat comes in through revalidate and we don't
1179 * differentiate this use from initial instantiation. we're
1180 * also being wildly conservative and flushing write caches
1181 * so that stat really returns the proper size.
1184 struct ldlm_extent extent = {0, OBD_OBJECT_EOF};
1185 struct lustre_handle lockh = {0};
1188 err = ll_extent_lock(NULL, inode, lsm, LCK_PR, &extent, &lockh);
1189 if (err != ELDLM_OK)
1192 ll_extent_unlock(NULL, inode, lsm, LCK_PR, &lockh);
1197 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1198 static int ll_getattr(struct vfsmount *mnt, struct dentry *de,
1202 struct inode *inode = de->d_inode;
1204 res = ll_inode_revalidate(de);
1207 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1208 stat->dev = inode->i_dev;
1210 stat->ino = inode->i_ino;
1211 stat->mode = inode->i_mode;
1212 stat->nlink = inode->i_nlink;
1213 stat->uid = inode->i_uid;
1214 stat->gid = inode->i_gid;
1215 stat->rdev = kdev_t_to_nr(inode->i_rdev);
1216 stat->atime = inode->i_atime;
1217 stat->mtime = inode->i_mtime;
1218 stat->ctime = inode->i_ctime;
1219 stat->size = inode->i_size;
1224 struct file_operations ll_file_operations = {
1226 write: ll_file_write,
1227 ioctl: ll_file_ioctl,
1229 release: ll_file_release,
1230 mmap: generic_file_mmap,
1231 llseek: ll_file_seek,
1235 struct inode_operations ll_file_inode_operations = {
1236 setattr_raw: ll_setattr_raw,
1237 setattr: ll_setattr,
1238 truncate: ll_truncate,
1239 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1240 getattr: ll_getattr,
1242 revalidate: ll_inode_revalidate,
1246 struct inode_operations ll_special_inode_operations = {
1247 setattr_raw: ll_setattr_raw,
1248 setattr: ll_setattr,
1249 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1250 getattr: ll_getattr,
1252 revalidate: ll_inode_revalidate,