1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
7 * Author: Andreas Dilger <adilger@clusterfs.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define DEBUG_SUBSYSTEM S_LLITE
26 #include <linux/lustre_dlm.h>
27 #include <linux/lustre_lite.h>
28 #include <linux/pagemap.h>
29 #include <linux/file.h>
30 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
31 #include <linux/lustre_compat25.h>
33 #include "llite_internal.h"
34 #include <linux/obd_lov.h>
36 int ll_mdc_close(struct obd_export *lmv_exp, struct inode *inode,
39 struct ll_file_data *fd = file->private_data;
40 struct ptlrpc_request *req = NULL;
41 struct obd_client_handle *och = &fd->fd_mds_och;
42 struct obdo *obdo = NULL;
46 /* clear group lock, if present */
47 if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
48 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
49 fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
50 rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP,
58 obdo->o_id = inode->i_ino;
59 obdo->o_valid = OBD_MD_FLID;
60 obdo_from_inode(obdo, inode, (OBD_MD_FLTYPE | OBD_MD_FLMODE |
61 OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
62 OBD_MD_FLATIME | OBD_MD_FLMTIME |
64 if (0 /* ll_is_inode_dirty(inode) */) {
65 obdo->o_flags = MDS_BFLAG_UNCOMMITTED_WRITES;
66 obdo->o_valid |= OBD_MD_FLFLAGS;
68 obdo->o_mds = id_group(&ll_i2info(inode)->lli_id);
69 rc = md_close(lmv_exp, obdo, och, &req);
73 /* We are the last writer, so the MDS has instructed us to get
74 * the file size and any write cookies, then close again. */
75 //ll_queue_done_writing(inode);
78 CERROR("inode %lu mdc close failed: rc = %d\n",
82 rc = ll_objects_destroy(req, file->f_dentry->d_inode, 1);
84 CERROR("inode %lu ll_objects destroy: rc = %d\n",
88 mdc_clear_open_replay_data(lmv_exp, och);
89 ptlrpc_req_finished(req);
90 och->och_fh.cookie = DEAD_HANDLE_MAGIC;
91 file->private_data = NULL;
92 OBD_SLAB_FREE(fd, ll_file_data_slab, sizeof(*fd));
96 /* While this returns an error code, fput() the caller does not, so we need
97 * to make every effort to clean up all of our state here. Also, applications
98 * rarely check close errors and even if an error is returned they will not
99 * re-try the close call.
101 int ll_file_release(struct inode *inode, struct file *file)
103 struct ll_file_data *fd;
104 struct ll_sb_info *sbi = ll_i2sbi(inode);
108 CDEBUG(D_VFSTRACE, "VFS Op:inode="DLID4"(%p)\n",
109 OLID4(&ll_i2info(inode)->lli_id), inode);
111 /* don't do anything for / */
112 if (inode->i_sb->s_root == file->f_dentry)
115 lprocfs_counter_incr(sbi->ll_stats, LPROC_LL_RELEASE);
116 fd = (struct ll_file_data *)file->private_data;
119 rc = ll_mdc_close(sbi->ll_lmv_exp, inode, file);
123 static int ll_intent_file_open(struct file *file, void *lmm,
124 int lmmsize, struct lookup_intent *itp)
126 struct ll_sb_info *sbi = ll_i2sbi(file->f_dentry->d_inode);
127 struct dentry *parent = file->f_dentry->d_parent;
128 const char *name = file->f_dentry->d_name.name;
129 const int len = file->f_dentry->d_name.len;
130 struct lustre_handle lockh;
131 struct mdc_op_data data;
137 ll_prepare_mdc_data(&data, parent->d_inode, NULL, name, len, O_RDWR);
139 rc = md_enqueue(sbi->ll_lmv_exp, LDLM_IBITS, itp, LCK_PR, &data,
140 &lockh, lmm, lmmsize, ldlm_completion_ast,
141 ll_mdc_blocking_ast, NULL);
143 if (itp->d.lustre.it_lock_mode)
144 memcpy(&itp->d.lustre.it_lock_handle,
145 &lockh, sizeof(lockh));
147 CERROR("lock enqueue: err: %d\n", rc);
153 int ll_local_open(struct file *file, struct lookup_intent *it)
155 struct ptlrpc_request *req = it->d.lustre.it_data;
156 struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
157 struct obd_export *lmv_exp = ll_i2lmvexp(file->f_dentry->d_inode);
158 struct ll_file_data *fd;
159 struct mds_body *body;
162 body = lustre_msg_buf (req->rq_repmsg, 1, sizeof (*body));
163 LASSERT (body != NULL); /* reply already checked out */
164 LASSERT_REPSWABBED (req, 1); /* and swabbed down */
166 LASSERTF(file->private_data == NULL, "file %*s/%*s ino %lu/%u (%o)\n",
167 file->f_dentry->d_name.len, file->f_dentry->d_name.name,
168 file->f_dentry->d_parent->d_name.len,
169 file->f_dentry->d_parent->d_name.name,
170 file->f_dentry->d_inode->i_ino,
171 file->f_dentry->d_inode->i_generation,
172 file->f_dentry->d_inode->i_mode);
175 OBD_SLAB_ALLOC(fd, ll_file_data_slab, SLAB_KERNEL, sizeof *fd);
177 /* We can't handle this well without reorganizing ll_file_open and
178 * ll_mdc_close, so don't even try right now. */
181 memcpy(&fd->fd_mds_och.och_fh, &body->handle, sizeof(body->handle));
182 fd->fd_mds_och.och_magic = OBD_CLIENT_HANDLE_MAGIC;
183 file->private_data = fd;
184 ll_readahead_init(file->f_dentry->d_inode, &fd->fd_ras);
186 lli->lli_io_epoch = body->io_epoch;
188 mdc_set_open_replay_data(lmv_exp, &fd->fd_mds_och, it->d.lustre.it_data);
193 /* Open a file, and (for the very first open) create objects on the OSTs at
194 * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
195 * creation or open until ll_lov_setstripe() ioctl is called. We grab
196 * lli_open_sem to ensure no other process will create objects, send the
197 * stripe MD to the MDS, or try to destroy the objects if that fails.
199 * If we already have the stripe MD locally then we don't request it in
200 * mdc_open(), by passing a lmm_size = 0.
202 * It is up to the application to ensure no other processes open this file
203 * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
204 * used. We might be able to avoid races of that sort by getting lli_open_sem
205 * before returning in the O_LOV_DELAY_CREATE case and dropping it here
206 * or in ll_file_release(), but I'm not sure that is desirable/necessary.
208 int ll_file_open(struct inode *inode, struct file *file)
210 struct ll_inode_info *lli = ll_i2info(inode);
211 struct lookup_intent *it, oit = { .it_op = IT_OPEN,
212 .it_flags = file->f_flags };
213 struct lov_stripe_md *lsm;
214 struct ptlrpc_request *req;
218 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), flags %o\n", inode->i_ino,
219 inode->i_generation, inode, file->f_flags);
221 /* don't do anything for / */
222 if (inode->i_sb->s_root == file->f_dentry)
227 if (!it || !it->d.lustre.it_disposition) {
229 rc = ll_intent_file_open(file, NULL, 0, it);
234 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_OPEN);
235 /* mdc_intent_lock() didn't get a request ref if there was an open
236 * error, so don't do cleanup on the request here (bug 3430) */
237 rc = it_open_error(DISP_OPEN_OPEN, it);
241 rc = ll_local_open(file, it);
243 LASSERTF(rc == 0, "rc = %d\n", rc);
245 if (!S_ISREG(inode->i_mode))
250 if (file->f_flags & O_LOV_DELAY_CREATE ||
251 !(file->f_mode & FMODE_WRITE)) {
252 CDEBUG(D_INODE, "object creation was delayed\n");
256 file->f_flags &= ~O_LOV_DELAY_CREATE;
259 req = it->d.lustre.it_data;
260 ptlrpc_req_finished(req);
262 ll_open_complete(inode);
266 /* Fills the obdo with the attributes for the inode defined by lsm */
267 int ll_lsm_getattr(struct obd_export *exp, struct lov_stripe_md *lsm,
270 struct ptlrpc_request_set *set;
274 LASSERT(lsm != NULL);
276 memset(oa, 0, sizeof *oa);
277 oa->o_id = lsm->lsm_object_id;
278 oa->o_gr = lsm->lsm_object_gr;
279 oa->o_mode = S_IFREG;
280 oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLSIZE |
281 OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
282 OBD_MD_FLCTIME | OBD_MD_FLGROUP;
284 set = ptlrpc_prep_set();
288 rc = obd_getattr_async(exp, oa, lsm, set);
290 rc = ptlrpc_set_wait(set);
291 ptlrpc_set_destroy(set);
296 oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
297 OBD_MD_FLCTIME | OBD_MD_FLSIZE);
301 static inline void ll_remove_suid(struct inode *inode)
305 /* set S_IGID if S_IXGRP is set, and always set S_ISUID */
306 mode = (inode->i_mode & S_IXGRP)*(S_ISGID/S_IXGRP) | S_ISUID;
308 /* was any of the uid bits set? */
309 mode &= inode->i_mode;
310 if (mode && !capable(CAP_FSETID)) {
311 inode->i_mode &= ~mode;
312 // XXX careful here - we cannot change the size
316 static int ll_lock_to_stripe_offset(struct inode *inode, struct ldlm_lock *lock)
318 struct ll_inode_info *lli = ll_i2info(inode);
319 struct lov_stripe_md *lsm = lli->lli_smd;
320 struct obd_export *exp = ll_i2obdexp(inode);
323 struct ldlm_lock *lock;
324 struct lov_stripe_md *lsm;
325 } key = { .name = "lock_to_stripe", .lock = lock, .lsm = lsm };
326 __u32 stripe, vallen = sizeof(stripe);
330 if (lsm->lsm_stripe_count == 1)
331 GOTO(check, stripe = 0);
333 /* get our offset in the lov */
334 rc = obd_get_info(exp, sizeof(key), &key, &vallen, &stripe);
336 CERROR("obd_get_info: rc = %d\n", rc);
339 LASSERT(stripe < lsm->lsm_stripe_count);
342 if (lsm->lsm_oinfo[stripe].loi_id != lock->l_resource->lr_name.name[0]||
343 lsm->lsm_oinfo[stripe].loi_gr != lock->l_resource->lr_name.name[2]){
344 LDLM_ERROR(lock, "resource doesn't match object "LPU64"/"LPU64
345 " inode=%lu/%u (%p)\n",
346 lsm->lsm_oinfo[stripe].loi_id,
347 lsm->lsm_oinfo[stripe].loi_gr,
348 inode->i_ino, inode->i_generation, inode);
349 return -ELDLM_NO_LOCK_DATA;
355 /* Flush the page cache for an extent as its canceled. When we're on an LOV,
356 * we get a lock cancellation for each stripe, so we have to map the obd's
357 * region back onto the stripes in the file that it held.
359 * No one can dirty the extent until we've finished our work and they can
360 * enqueue another lock. The DLM protects us from ll_file_read/write here,
361 * but other kernel actors could have pages locked.
363 * Called with the DLM lock held. */
364 void ll_pgcache_remove_extent(struct inode *inode, struct lov_stripe_md *lsm,
365 struct ldlm_lock *lock, __u32 stripe)
367 ldlm_policy_data_t tmpex;
368 unsigned long start, end, count, skip, i, j;
370 int rc, rc2, discard = lock->l_flags & LDLM_FL_DISCARD_DATA;
371 struct lustre_handle lockh;
374 memcpy(&tmpex, &lock->l_policy_data, sizeof(tmpex));
375 CDEBUG(D_INODE|D_PAGE, "inode %lu(%p) ["LPU64"->"LPU64"] size: %llu\n",
376 inode->i_ino, inode, tmpex.l_extent.start, tmpex.l_extent.end,
379 /* our locks are page granular thanks to osc_enqueue, we invalidate the
381 LASSERT((tmpex.l_extent.start & ~PAGE_CACHE_MASK) == 0);
382 LASSERT(((tmpex.l_extent.end + 1) & ~PAGE_CACHE_MASK) == 0);
386 start = tmpex.l_extent.start >> PAGE_CACHE_SHIFT;
387 end = tmpex.l_extent.end >> PAGE_CACHE_SHIFT;
388 if (lsm->lsm_stripe_count > 1) {
389 count = lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT;
390 skip = (lsm->lsm_stripe_count - 1) * count;
391 start += start/count * skip + stripe * count;
393 end += end/count * skip + stripe * count;
395 if (end < tmpex.l_extent.end >> PAGE_CACHE_SHIFT)
398 i = inode->i_size ? (inode->i_size - 1) >> PAGE_CACHE_SHIFT : 0;
402 CDEBUG(D_INODE|D_PAGE, "walking page indices start: %lu j: %lu "
403 "count: %lu skip: %lu end: %lu%s\n", start, start % count,
404 count, skip, end, discard ? " (DISCARDING)" : "");
406 /* walk through the vmas on the inode and tear down mmaped pages that
407 * intersect with the lock. this stops immediately if there are no
408 * mmap()ed regions of the file. This is not efficient at all and
409 * should be short lived. We'll associate mmap()ed pages with the lock
410 * and will be able to find them directly */
412 for (i = start; i <= end; i += (j + skip)) {
413 j = min(count - (i % count), end - i + 1);
414 LASSERT(inode->i_mapping);
415 if (ll_teardown_mmaps(inode->i_mapping, i << PAGE_CACHE_SHIFT,
416 ((i+j) << PAGE_CACHE_SHIFT) - 1) )
420 /* this is the simplistic implementation of page eviction at
421 * cancelation. It is careful to get races with other page
422 * lockers handled correctly. fixes from bug 20 will make it
423 * more efficient by associating locks with pages and with
424 * batching writeback under the lock explicitly. */
425 for (i = start, j = start % count; i <= end;
426 j++, i++, tmpex.l_extent.start += PAGE_CACHE_SIZE) {
428 CDEBUG(D_PAGE, "skip index %lu to %lu\n", i, i + skip);
434 LASSERTF(tmpex.l_extent.start< lock->l_policy_data.l_extent.end,
435 LPU64" >= "LPU64" start %lu i %lu end %lu\n",
436 tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
439 if (!mapping_has_pages(inode->i_mapping)) {
440 CDEBUG(D_INODE|D_PAGE, "nothing left\n");
446 page = find_get_page(inode->i_mapping, i);
449 LL_CDEBUG_PAGE(D_PAGE, page, "lock page idx %lu ext "LPU64"\n",
450 i, tmpex.l_extent.start);
453 /* page->mapping to check with racing against teardown */
454 if (!discard && clear_page_dirty_for_io(page)) {
455 rc = ll_call_writepage(inode, page);
457 CERROR("writepage of page %p failed: %d\n",
459 /* either waiting for io to complete or reacquiring
460 * the lock that the failed writepage released */
464 tmpex.l_extent.end = tmpex.l_extent.start + PAGE_CACHE_SIZE - 1;
465 /* check to see if another DLM lock covers this page */
466 rc2 = ldlm_lock_match(lock->l_resource->lr_namespace,
467 LDLM_FL_BLOCK_GRANTED|LDLM_FL_CBPENDING |
469 &lock->l_resource->lr_name, LDLM_EXTENT,
470 &tmpex, LCK_PR | LCK_PW, &lockh);
471 if (rc2 == 0 && page->mapping != NULL) {
472 // checking again to account for writeback's lock_page()
473 LL_CDEBUG_PAGE(D_PAGE, page, "truncating\n");
474 ll_ra_accounting(page, inode->i_mapping);
475 ll_truncate_complete_page(page);
478 page_cache_release(page);
480 LASSERTF(tmpex.l_extent.start <=
481 (lock->l_policy_data.l_extent.end == ~0ULL ? ~0ULL :
482 lock->l_policy_data.l_extent.end + 1),
483 "loop too long "LPU64" > "LPU64" start %lu i %lu end %lu\n",
484 tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
489 static int ll_extent_lock_callback(struct ldlm_lock *lock,
490 struct ldlm_lock_desc *new, void *data,
493 struct lustre_handle lockh = { 0 };
497 if ((unsigned long)data > 0 && (unsigned long)data < 0x1000) {
498 LDLM_ERROR(lock, "cancelling lock with bad data %p", data);
503 case LDLM_CB_BLOCKING:
504 ldlm_lock2handle(lock, &lockh);
505 rc = ldlm_cli_cancel(&lockh);
507 CERROR("ldlm_cli_cancel failed: %d\n", rc);
509 case LDLM_CB_CANCELING: {
511 struct ll_inode_info *lli;
512 struct lov_stripe_md *lsm;
516 /* This lock wasn't granted, don't try to evict pages */
517 if (lock->l_req_mode != lock->l_granted_mode)
520 inode = ll_inode_from_lock(lock);
523 lli = ll_i2info(inode);
526 if (lli->lli_smd == NULL)
530 stripe = ll_lock_to_stripe_offset(inode, lock);
533 ll_pgcache_remove_extent(inode, lsm, lock, stripe);
535 /* grabbing the i_sem will wait for write() to complete. ns
536 * lock hold times should be very short as ast processing
537 * requires them and has a short timeout. so, i_sem before ns
541 l_lock(&lock->l_resource->lr_namespace->ns_lock);
542 kms = ldlm_extent_shift_kms(lock,
543 lsm->lsm_oinfo[stripe].loi_kms);
545 if (lsm->lsm_oinfo[stripe].loi_kms != kms)
546 LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
547 lsm->lsm_oinfo[stripe].loi_kms, kms);
548 lsm->lsm_oinfo[stripe].loi_kms = kms;
549 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
551 //ll_try_done_writing(inode);
564 int ll_async_completion_ast(struct ldlm_lock *lock, int flags, void *data)
566 /* XXX ALLOCATE - 160 bytes */
567 struct inode *inode = ll_inode_from_lock(lock);
568 struct ll_inode_info *lli = ll_i2info(inode);
569 struct lustre_handle lockh = { 0 };
574 if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
575 LDLM_FL_BLOCK_CONV)) {
576 LBUG(); /* not expecting any blocked async locks yet */
577 LDLM_DEBUG(lock, "client-side async enqueue returned a blocked "
579 ldlm_lock_dump(D_OTHER, lock, 0);
580 ldlm_reprocess_all(lock->l_resource);
584 LDLM_DEBUG(lock, "client-side async enqueue: granted/glimpsed");
586 stripe = ll_lock_to_stripe_offset(inode, lock);
590 if (lock->l_lvb_len) {
591 struct lov_stripe_md *lsm = lli->lli_smd;
593 lvb = lock->l_lvb_data;
594 lsm->lsm_oinfo[stripe].loi_rss = lvb->lvb_size;
596 l_lock(&lock->l_resource->lr_namespace->ns_lock);
598 kms = MAX(lsm->lsm_oinfo[stripe].loi_kms, lvb->lvb_size);
599 kms = ldlm_extent_shift_kms(NULL, kms);
600 if (lsm->lsm_oinfo[stripe].loi_kms != kms)
601 LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
602 lsm->lsm_oinfo[stripe].loi_kms, kms);
603 lsm->lsm_oinfo[stripe].loi_kms = kms;
605 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
610 wake_up(&lock->l_waitq);
612 ldlm_lock2handle(lock, &lockh);
613 ldlm_lock_decref(&lockh, LCK_PR);
618 static int ll_glimpse_callback(struct ldlm_lock *lock, void *reqp)
620 struct ptlrpc_request *req = reqp;
621 struct inode *inode = ll_inode_from_lock(lock);
622 struct ll_inode_info *lli;
624 struct lov_stripe_md *lsm;
625 int rc, size = sizeof(*lvb), stripe;
629 GOTO(out, rc = -ELDLM_NO_LOCK_DATA);
630 lli = ll_i2info(inode);
632 GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
636 GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
638 /* First, find out which stripe index this lock corresponds to. */
639 stripe = ll_lock_to_stripe_offset(inode, lock);
641 GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
643 rc = lustre_pack_reply(req, 1, &size, NULL);
645 CERROR("lustre_pack_reply: %d\n", rc);
649 lvb = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*lvb));
650 lvb->lvb_size = lli->lli_smd->lsm_oinfo[stripe].loi_kms;
652 LDLM_DEBUG(lock, "i_size: %llu -> stripe number %u -> kms "LPU64,
653 inode->i_size, stripe, lvb->lvb_size);
659 /* These errors are normal races, so we don't want to fill the console
660 * with messages by calling ptlrpc_error() */
661 if (rc == -ELDLM_NO_LOCK_DATA)
662 lustre_pack_reply(req, 0, NULL, NULL);
668 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
669 __u64 lov_merge_blocks(struct lov_stripe_md *lsm);
670 __u64 lov_merge_mtime(struct lov_stripe_md *lsm, __u64 current_time);
672 /* NB: lov_merge_size will prefer locally cached writes if they extend the
673 * file (because it prefers KMS over RSS when larger) */
674 int ll_glimpse_size(struct inode *inode)
676 struct ll_inode_info *lli = ll_i2info(inode);
677 struct ll_sb_info *sbi = ll_i2sbi(inode);
678 ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
679 struct lustre_handle lockh = { 0 };
680 int rc, flags = LDLM_FL_HAS_INTENT;
683 CDEBUG(D_DLMTRACE, "Glimpsing inode %lu\n", inode->i_ino);
685 rc = obd_enqueue(sbi->ll_lov_exp, lli->lli_smd, LDLM_EXTENT, &policy,
686 LCK_PR, &flags, ll_extent_lock_callback,
687 ldlm_completion_ast, ll_glimpse_callback, inode,
688 sizeof(struct ost_lvb), lustre_swab_ost_lvb, &lockh);
693 CERROR("obd_enqueue returned rc %d, returning -EIO\n", rc);
694 RETURN(rc > 0 ? -EIO : rc);
697 inode->i_size = lov_merge_size(lli->lli_smd, 0);
698 inode->i_blocks = lov_merge_blocks(lli->lli_smd);
699 //inode->i_mtime = lov_merge_mtime(lli->lli_smd, inode->i_mtime);
701 CDEBUG(D_DLMTRACE, "glimpse: size: "LPU64", blocks: "LPU64"\n",
702 (__u64)inode->i_size, (__u64)inode->i_blocks);
703 obd_cancel(sbi->ll_lov_exp, lli->lli_smd, LCK_PR, &lockh);
707 void ll_stime_record(struct ll_sb_info *sbi, struct timeval *start,
708 struct obd_service_time *stime)
711 do_gettimeofday(&stop);
713 spin_lock(&sbi->ll_lock);
714 lprocfs_stime_record(stime, &stop, start);
715 spin_unlock(&sbi->ll_lock);
718 int ll_extent_lock(struct ll_file_data *fd, struct inode *inode,
719 struct lov_stripe_md *lsm, int mode,
720 ldlm_policy_data_t *policy, struct lustre_handle *lockh,
721 int ast_flags, struct obd_service_time *stime)
723 struct ll_sb_info *sbi = ll_i2sbi(inode);
724 struct timeval start;
728 LASSERT(lockh->cookie == 0);
730 /* XXX phil: can we do this? won't it screw the file size up? */
731 if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
732 (sbi->ll_flags & LL_SBI_NOLCK))
735 CDEBUG(D_DLMTRACE, "Locking inode %lu, start "LPU64" end "LPU64"\n",
736 inode->i_ino, policy->l_extent.start, policy->l_extent.end);
738 do_gettimeofday(&start);
739 rc = obd_enqueue(sbi->ll_lov_exp, lsm, LDLM_EXTENT, policy, mode,
740 &ast_flags, ll_extent_lock_callback,
741 ldlm_completion_ast, ll_glimpse_callback, inode,
742 sizeof(struct ost_lvb), lustre_swab_ost_lvb, lockh);
746 ll_stime_record(sbi, &start, stime);
748 if (policy->l_extent.start == 0 &&
749 policy->l_extent.end == OBD_OBJECT_EOF) {
750 /* vmtruncate()->ll_truncate() first sets the i_size and then
751 * the kms under both a DLM lock and the i_sem. If we don't
752 * get the i_sem here we can match the DLM lock and reset
753 * i_size from the kms before the truncating path has updated
754 * the kms. generic_file_write can then trust the stale i_size
755 * when doing appending writes and effectively cancel the
756 * result of the truncate. Getting the i_sem after the enqueue
757 * maintains the DLM -> i_sem acquiry order. */
759 inode->i_size = lov_merge_size(lsm, 1);
762 //inode->i_mtime = lov_merge_mtime(lsm, inode->i_mtime);
767 int ll_extent_unlock(struct ll_file_data *fd, struct inode *inode,
768 struct lov_stripe_md *lsm, int mode,
769 struct lustre_handle *lockh)
771 struct ll_sb_info *sbi = ll_i2sbi(inode);
775 /* XXX phil: can we do this? won't it screw the file size up? */
776 if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
777 (sbi->ll_flags & LL_SBI_NOLCK))
780 rc = obd_cancel(sbi->ll_lov_exp, lsm, mode, lockh);
785 static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
788 struct inode *inode = file->f_dentry->d_inode;
789 struct ll_inode_info *lli = ll_i2info(inode);
790 struct lov_stripe_md *lsm = lli->lli_smd;
791 struct ll_lock_tree tree;
792 struct ll_lock_tree_node *node;
797 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
798 inode->i_ino, inode->i_generation, inode, count, *ppos);
800 /* "If nbyte is 0, read() will return 0 and have no other results."
801 * -- Single Unix Spec */
805 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats, LPROC_LL_READ_BYTES,
811 node = ll_node_from_inode(inode, *ppos, *ppos + count - 1,
814 tree.lt_fd = file->private_data;
816 rc = ll_tree_lock(&tree, node, inode, buf, count,
817 file->f_flags & O_NONBLOCK ? LDLM_FL_BLOCK_NOWAIT :0);
821 kms = lov_merge_size(lsm, 1);
822 if (*ppos + count - 1 > kms) {
823 /* A glimpse is necessary to determine whether we return a short
824 * read or some zeroes at the end of the buffer */
825 retval = ll_glimpse_size(inode);
832 CDEBUG(D_INFO, "Read ino %lu, "LPSZ" bytes, offset %lld, i_size %llu\n",
833 inode->i_ino, count, *ppos, inode->i_size);
835 /* turn off the kernel's read-ahead */
836 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
839 file->f_ra.ra_pages = 0;
841 retval = generic_file_read(file, buf, count, ppos);
844 ll_tree_unlock(&tree, inode);
849 * Write to a file (through the page cache).
851 static ssize_t ll_file_write(struct file *file, const char *buf,
852 size_t count, loff_t *ppos)
854 struct inode *inode = file->f_dentry->d_inode;
855 loff_t maxbytes = ll_file_maxbytes(inode);
856 struct ll_lock_tree tree;
857 struct ll_lock_tree_node *node;
862 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
863 inode->i_ino, inode->i_generation, inode, count, *ppos);
865 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
867 /* POSIX, but surprised the VFS doesn't check this already */
871 /* If file was opened for LL_IOC_LOV_SETSTRIPE but the ioctl wasn't
872 * called on the file, don't fail the below assertion (bug 2388). */
873 if (file->f_flags & O_LOV_DELAY_CREATE &&
874 ll_i2info(inode)->lli_smd == NULL)
877 LASSERT(ll_i2info(inode)->lli_smd != NULL);
879 if (file->f_flags & O_APPEND)
880 node = ll_node_from_inode(inode, 0, OBD_OBJECT_EOF, LCK_PW);
882 node = ll_node_from_inode(inode, *ppos, *ppos + count - 1,
886 RETURN(PTR_ERR(node));
888 tree.lt_fd = file->private_data;
890 rc = ll_tree_lock(&tree, node, inode, buf, count,
891 file->f_flags & O_NONBLOCK ? LDLM_FL_BLOCK_NOWAIT :0);
895 /* this is ok, g_f_w will overwrite this under i_sem if it races
896 * with a local truncate, it just makes our maxbyte checking easier */
897 if (file->f_flags & O_APPEND)
898 *ppos = inode->i_size;
900 if (*ppos >= maxbytes) {
901 if (count || *ppos > maxbytes) {
902 send_sig(SIGXFSZ, current, 0);
903 GOTO(out, retval = -EFBIG);
906 if (*ppos + count > maxbytes)
907 count = maxbytes - *ppos;
909 CDEBUG(D_INFO, "Writing inode %lu, "LPSZ" bytes, offset %Lu\n",
910 inode->i_ino, count, *ppos);
912 /* generic_file_write handles O_APPEND after getting i_sem */
913 retval = generic_file_write(file, buf, count, ppos);
916 ll_tree_unlock(&tree, inode);
917 /* serialize with mmap/munmap/mremap */
918 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats, LPROC_LL_WRITE_BYTES,
919 retval > 0 ? retval : 0);
923 static int ll_lov_recreate_obj(struct inode *inode, struct file *file,
926 struct ll_inode_info *lli = ll_i2info(inode);
927 struct obd_export *exp = ll_i2obdexp(inode);
928 struct ll_recreate_obj ucreatp;
929 struct obd_trans_info oti = { 0 };
930 struct obdo *oa = NULL;
933 struct lov_stripe_md *lsm, *lsm2;
936 if (!capable (CAP_SYS_ADMIN))
939 rc = copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
940 sizeof(struct ll_recreate_obj));
948 down(&lli->lli_open_sem);
951 GOTO(out, rc = -ENOENT);
952 lsm_size = sizeof(*lsm) + (sizeof(struct lov_oinfo) *
953 (lsm->lsm_stripe_count));
955 OBD_ALLOC(lsm2, lsm_size);
957 GOTO(out, rc = -ENOMEM);
959 oa->o_id = ucreatp.lrc_id;
960 oa->o_nlink = ucreatp.lrc_ost_idx;
961 oa->o_gr = ucreatp.lrc_group;
962 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLFLAGS;
963 oa->o_flags |= OBD_FL_RECREATE_OBJS;
964 obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
965 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
967 oti.oti_objid = NULL;
968 memcpy(lsm2, lsm, lsm_size);
969 rc = obd_create(exp, oa, &lsm2, &oti);
971 OBD_FREE(lsm2, lsm_size);
974 up(&lli->lli_open_sem);
979 static int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
980 int flags, struct lov_user_md *lum,
983 struct ll_inode_info *lli = ll_i2info(inode);
985 struct obd_export *exp = ll_i2obdexp(inode);
986 struct lov_stripe_md *lsm;
987 struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
988 struct ptlrpc_request *req = NULL;
993 down(&lli->lli_open_sem);
996 up(&lli->lli_open_sem);
997 CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
1002 f = get_empty_filp();
1006 f->f_dentry = file->f_dentry;
1007 f->f_vfsmnt = file->f_vfsmnt;
1009 rc = ll_intent_file_open(f, lum, lum_size, &oit);
1012 if (it_disposition(&oit, DISP_LOOKUP_NEG))
1014 req = oit.d.lustre.it_data;
1015 rc = oit.d.lustre.it_status;
1020 rc = mdc_req2lustre_md(ll_i2lmvexp(inode), req, 1, exp, &md);
1023 ll_update_inode(f->f_dentry->d_inode, &md);
1025 rc = ll_local_open(f, &oit);
1028 ll_intent_release(&oit);
1030 rc = ll_file_release(f->f_dentry->d_inode, f);
1035 up(&lli->lli_open_sem);
1037 ptlrpc_req_finished(req);
1041 static int ll_lov_setea(struct inode *inode, struct file *file,
1044 int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
1045 struct lov_user_md *lump;
1046 int lum_size = sizeof(struct lov_user_md) +
1047 sizeof(struct lov_user_ost_data);
1051 if (!capable (CAP_SYS_ADMIN))
1054 OBD_ALLOC(lump, lum_size);
1058 rc = copy_from_user(lump, (struct lov_user_md *)arg, lum_size);
1060 OBD_FREE(lump, lum_size);
1064 rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
1066 OBD_FREE(lump, lum_size);
1070 static int ll_lov_setstripe(struct inode *inode, struct file *file,
1073 struct lov_user_md lum, *lump = (struct lov_user_md *)arg;
1075 int flags = FMODE_WRITE;
1078 /* Bug 1152: copy properly when this is no longer true */
1079 LASSERT(sizeof(lum) == sizeof(*lump));
1080 LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0]));
1081 rc = copy_from_user(&lum, lump, sizeof(lum));
1085 rc = ll_lov_setstripe_ea_info(inode, file, flags, &lum, sizeof(lum));
1087 put_user(0, &lump->lmm_stripe_count);
1088 rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2obdexp(inode),
1089 0, ll_i2info(inode)->lli_smd, lump);
1094 static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
1096 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1101 return obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2obdexp(inode), 0, lsm,
1105 static int ll_get_grouplock(struct inode *inode, struct file *file,
1108 struct ll_file_data *fd = file->private_data;
1109 ldlm_policy_data_t policy = { .l_extent = { .start = 0,
1110 .end = OBD_OBJECT_EOF}};
1111 struct lustre_handle lockh = { 0 };
1112 struct ll_inode_info *lli = ll_i2info(inode);
1113 struct lov_stripe_md *lsm = lli->lli_smd;
1117 if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
1121 policy.l_extent.gid = arg;
1122 if (file->f_flags & O_NONBLOCK)
1123 flags = LDLM_FL_BLOCK_NOWAIT;
1125 rc = ll_extent_lock(fd, inode, lsm, LCK_GROUP, &policy, &lockh, flags,
1126 &ll_i2sbi(inode)->ll_grouplock_stime);
1130 fd->fd_flags |= LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK;
1132 memcpy(&fd->fd_cwlockh, &lockh, sizeof(lockh));
1137 static int ll_put_grouplock(struct inode *inode, struct file *file,
1140 struct ll_file_data *fd = file->private_data;
1141 struct ll_inode_info *lli = ll_i2info(inode);
1142 struct lov_stripe_md *lsm = lli->lli_smd;
1146 if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
1147 /* Ugh, it's already unlocked. */
1151 if (fd->fd_gid != arg) /* Ugh? Unlocking with different gid? */
1154 fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
1156 rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP, &fd->fd_cwlockh);
1161 memset(&fd->fd_cwlockh, 0, sizeof(fd->fd_cwlockh));
1166 int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
1169 struct ll_file_data *fd = file->private_data;
1170 struct ll_sb_info *sbi = ll_i2sbi(inode);
1174 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino,
1175 inode->i_generation, inode, cmd);
1177 if (_IOC_TYPE(cmd) == 'T') /* tty ioctls */
1180 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_IOCTL);
1182 case LL_IOC_GETFLAGS:
1183 /* Get the current value of the file flags */
1184 return put_user(fd->fd_flags, (int *)arg);
1185 case LL_IOC_SETFLAGS:
1186 case LL_IOC_CLRFLAGS:
1187 /* Set or clear specific file flags */
1188 /* XXX This probably needs checks to ensure the flags are
1189 * not abused, and to handle any flag side effects.
1191 if (get_user(flags, (int *) arg))
1194 if (cmd == LL_IOC_SETFLAGS)
1195 fd->fd_flags |= flags;
1197 fd->fd_flags &= ~flags;
1199 case LL_IOC_LOV_SETSTRIPE:
1200 RETURN(ll_lov_setstripe(inode, file, arg));
1201 case LL_IOC_LOV_SETEA:
1202 RETURN(ll_lov_setea(inode, file, arg));
1203 case IOC_MDC_SHOWFID: {
1204 struct lustre_id *idp = (struct lustre_id *)arg;
1205 struct lustre_id id;
1209 filename = getname((const char *)arg);
1210 if (IS_ERR(filename))
1211 RETURN(PTR_ERR(filename));
1213 ll_inode2id(&id, inode);
1215 rc = ll_get_fid(sbi->ll_lmv_exp, &id, filename, &id);
1217 GOTO(out_filename, rc);
1219 rc = copy_to_user(idp, &id, sizeof(*idp));
1221 GOTO(out_filename, rc = -EFAULT);
1228 case LL_IOC_LOV_GETSTRIPE:
1229 RETURN(ll_lov_getstripe(inode, arg));
1230 case LL_IOC_RECREATE_OBJ:
1231 RETURN(ll_lov_recreate_obj(inode, file, arg));
1232 case EXT3_IOC_GETFLAGS:
1233 case EXT3_IOC_SETFLAGS:
1234 RETURN( ll_iocontrol(inode, file, cmd, arg) );
1235 case LL_IOC_GROUP_LOCK:
1236 RETURN(ll_get_grouplock(inode, file, arg));
1237 case LL_IOC_GROUP_UNLOCK:
1238 RETURN(ll_put_grouplock(inode, file, arg));
1239 case EXT3_IOC_GETVERSION_OLD:
1240 case EXT3_IOC_GETVERSION:
1241 return put_user(inode->i_generation, (int *) arg);
1242 /* We need to special case any other ioctls we want to handle,
1243 * to send them to the MDS/OST as appropriate and to properly
1244 * network encode the arg field.
1245 case EXT2_IOC_GETVERSION_OLD:
1246 case EXT2_IOC_GETVERSION_NEW:
1247 case EXT2_IOC_SETVERSION_OLD:
1248 case EXT2_IOC_SETVERSION_NEW:
1249 case EXT3_IOC_SETVERSION_OLD:
1250 case EXT3_IOC_SETVERSION:
1253 RETURN( obd_iocontrol(cmd, ll_i2obdexp(inode), 0, NULL,
1258 loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
1260 struct inode *inode = file->f_dentry->d_inode;
1261 struct ll_file_data *fd = file->private_data;
1262 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1263 struct lustre_handle lockh = {0};
1266 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),to=%llu\n", inode->i_ino,
1267 inode->i_generation, inode,
1268 offset + ((origin==2) ? inode->i_size : file->f_pos));
1270 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_LLSEEK);
1271 if (origin == 2) { /* SEEK_END */
1272 ldlm_policy_data_t policy = { .l_extent = {0, OBD_OBJECT_EOF }};
1273 int nonblock = 0, rc;
1275 if (file->f_flags & O_NONBLOCK)
1276 nonblock = LDLM_FL_BLOCK_NOWAIT;
1278 rc = ll_extent_lock(fd, inode, lsm, LCK_PR, &policy, &lockh,
1279 nonblock, &ll_i2sbi(inode)->ll_seek_stime);
1283 offset += inode->i_size;
1284 } else if (origin == 1) { /* SEEK_CUR */
1285 offset += file->f_pos;
1289 if (offset >= 0 && offset <= ll_file_maxbytes(inode)) {
1290 if (offset != file->f_pos) {
1291 file->f_pos = offset;
1292 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1294 file->f_version = ++event;
1301 ll_extent_unlock(fd, inode, lsm, LCK_PR, &lockh);
1305 int ll_fsync(struct file *file, struct dentry *dentry, int data)
1307 struct inode *inode = dentry->d_inode;
1308 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1309 struct lustre_id id;
1310 struct ptlrpc_request *req;
1313 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1314 inode->i_generation, inode);
1316 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_FSYNC);
1318 /* fsync's caller has already called _fdata{sync,write}, we want
1319 * that IO to finish before calling the osc and mdc sync methods */
1320 rc = filemap_fdatawait(inode->i_mapping);
1322 ll_inode2id(&id, inode);
1323 err = md_sync(ll_i2sbi(inode)->ll_lmv_exp, &id, &req);
1327 ptlrpc_req_finished(req);
1330 struct obdo *oa = obdo_alloc();
1333 RETURN(rc ? rc : -ENOMEM);
1335 oa->o_id = lsm->lsm_object_id;
1336 oa->o_gr = lsm->lsm_object_gr;
1337 oa->o_valid = OBD_MD_FLID;
1338 obdo_from_inode(oa, inode, (OBD_MD_FLTYPE | OBD_MD_FLATIME |
1339 OBD_MD_FLMTIME | OBD_MD_FLCTIME |
1342 err = obd_sync(ll_i2sbi(inode)->ll_lov_exp, oa, lsm,
1352 int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
1354 struct inode *inode = file->f_dentry->d_inode;
1355 struct ll_inode_info *li = ll_i2info(inode);
1356 struct ll_sb_info *sbi = ll_i2sbi(inode);
1357 struct obd_device *obddev;
1358 struct ldlm_res_id res_id =
1359 { .name = {id_fid(&li->lli_id), id_group(&li->lli_id), LDLM_FLOCK} };
1360 struct lustre_handle lockh = {0};
1361 ldlm_policy_data_t flock;
1362 ldlm_mode_t mode = 0;
1367 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
1368 inode->i_ino, file_lock);
1370 flock.l_flock.pid = file_lock->fl_pid;
1371 flock.l_flock.start = file_lock->fl_start;
1372 flock.l_flock.end = file_lock->fl_end;
1374 switch (file_lock->fl_type) {
1379 /* An unlock request may or may not have any relation to
1380 * existing locks so we may not be able to pass a lock handle
1381 * via a normal ldlm_lock_cancel() request. The request may even
1382 * unlock a byte range in the middle of an existing lock. In
1383 * order to process an unlock request we need all of the same
1384 * information that is given with a normal read or write record
1385 * lock request. To avoid creating another ldlm unlock (cancel)
1386 * message we'll treat a LCK_NL flock request as an unlock. */
1393 CERROR("unknown fcntl lock type: %d\n", file_lock->fl_type);
1408 flags = LDLM_FL_BLOCK_NOWAIT;
1414 flags = LDLM_FL_TEST_LOCK;
1415 /* Save the old mode so that if the mode in the lock changes we
1416 * can decrement the appropriate reader or writer refcount. */
1417 file_lock->fl_type = mode;
1420 CERROR("unknown fcntl lock command: %d\n", cmd);
1424 CDEBUG(D_DLMTRACE, "inode=%lu, pid="LPU64", flags=%#x, mode=%u, "
1425 "start="LPU64", end="LPU64"\n", inode->i_ino, flock.l_flock.pid,
1426 flags, mode, flock.l_flock.start, flock.l_flock.end);
1428 obddev = md_get_real_obd(sbi->ll_lmv_exp, NULL, 0);
1429 rc = ldlm_cli_enqueue(obddev->obd_self_export, NULL,
1430 obddev->obd_namespace,
1431 res_id, LDLM_FLOCK, &flock, mode, &flags,
1432 NULL, ldlm_flock_completion_ast, NULL, file_lock,
1433 NULL, 0, NULL, &lockh);
1437 int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it)
1439 struct lookup_intent oit = { .it_op = IT_GETATTR };
1440 struct inode *inode = dentry->d_inode;
1441 struct ptlrpc_request *req = NULL;
1442 struct ll_inode_info *lli;
1443 struct lov_stripe_md *lsm;
1444 struct ll_sb_info *sbi;
1445 struct lustre_id id;
1451 CERROR("REPORT THIS LINE TO PETER\n");
1455 sbi = ll_i2sbi(inode);
1457 ll_inode2id(&id, inode);
1458 lli = ll_i2info(inode);
1459 LASSERT(id_fid(&id) != 0);
1461 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), name=%s, intent=%s\n",
1462 inode->i_ino, inode->i_generation, inode, dentry->d_name.name,
1465 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0))
1466 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_REVALIDATE);
1469 rc = md_intent_lock(sbi->ll_lmv_exp, &id, NULL, 0, NULL, 0, &id,
1470 &oit, 0, &req, ll_mdc_blocking_ast);
1474 rc = revalidate_it_finish(req, 1, &oit, dentry);
1476 ll_intent_release(&oit);
1480 if (dentry->d_iname[0] != '/') { /* Do not rehash root of the tree
1482 spin_lock(&dcache_lock);
1483 hlist_del_init(&dentry->d_hash);
1484 __d_rehash(dentry, 0);
1485 spin_unlock(&dcache_lock);
1487 ll_lookup_finish_locks(&oit, dentry);
1488 dentry->d_flags &= ~DCACHE_LUSTRE_INVALID;
1492 if (lsm == NULL) /* object not yet allocated, don't validate size */
1496 * ll_glimpse_size() will prefer locally cached writes if they extend
1499 rc = ll_glimpse_size(inode);
1503 ptlrpc_req_finished(req);
1507 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1508 int ll_getattr(struct vfsmount *mnt, struct dentry *de,
1509 struct lookup_intent *it, struct kstat *stat)
1512 struct inode *inode = de->d_inode;
1514 res = ll_inode_revalidate_it(de, it);
1515 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_GETATTR);
1520 stat->ino = inode->i_ino;
1521 stat->mode = inode->i_mode;
1522 stat->nlink = inode->i_nlink;
1523 stat->uid = inode->i_uid;
1524 stat->gid = inode->i_gid;
1525 stat->atime = inode->i_atime;
1526 stat->mtime = inode->i_mtime;
1527 stat->ctime = inode->i_ctime;
1528 stat->size = inode->i_size;
1529 stat->blksize = inode->i_blksize;
1530 stat->blocks = inode->i_blocks;
1531 stat->rdev = kdev_t_to_nr(inode->i_rdev);
1532 stat->dev = id_group(&ll_i2info(inode)->lli_id);
1537 struct file_operations ll_file_operations = {
1538 .read = ll_file_read,
1539 .write = ll_file_write,
1540 .ioctl = ll_file_ioctl,
1541 .open = ll_file_open,
1542 .release = ll_file_release,
1543 .mmap = ll_file_mmap,
1544 .llseek = ll_file_seek,
1545 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
1546 .sendfile = generic_file_sendfile,
1549 .lock = ll_file_flock
1552 struct inode_operations ll_file_inode_operations = {
1553 .setattr_raw = ll_setattr_raw,
1554 .setattr = ll_setattr,
1555 .truncate = ll_truncate,
1556 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1557 .getattr_it = ll_getattr,
1559 .revalidate_it = ll_inode_revalidate_it,