1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
7 * Author: Andreas Dilger <adilger@clusterfs.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define DEBUG_SUBSYSTEM S_LLITE
26 #include <linux/lustre_dlm.h>
27 #include <linux/lustre_lite.h>
28 #include <linux/pagemap.h>
29 #include <linux/file.h>
30 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
31 #include <linux/lustre_compat25.h>
34 #include "llite_internal.h"
36 static int ll_mdc_close(struct obd_export *mdc_exp, struct inode *inode,
39 struct ll_file_data *fd = file->private_data;
40 struct ptlrpc_request *req = NULL;
41 struct obd_client_handle *och = &fd->fd_mds_och;
46 /* clear group lock, if present */
47 if (fd->fd_flags & LL_FILE_CW_LOCKED) {
48 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
49 fd->fd_flags &= ~(LL_FILE_CW_LOCKED|LL_FILE_IGNORE_LOCK);
50 rc = ll_extent_unlock(fd, inode, lsm, LCK_CW, &fd->fd_cwlockh);
55 memset(&obdo, 0, sizeof(obdo));
56 obdo.o_id = inode->i_ino;
57 obdo.o_mode = inode->i_mode;
58 obdo.o_size = inode->i_size;
59 obdo.o_blocks = inode->i_blocks;
60 if (0 /* ll_is_inode_dirty(inode) */) {
61 obdo.o_flags = MDS_BFLAG_UNCOMMITTED_WRITES;
62 valid |= OBD_MD_FLFLAGS;
65 obdo.o_mds = ll_i2info(inode)->lli_mds;
66 rc = md_close(mdc_exp, &obdo, och, &req);
68 /* We are the last writer, so the MDS has instructed us to get
69 * the file size and any write cookies, then close again. */
70 //ll_queue_done_writing(inode);
73 CERROR("inode %lu mdc close failed: rc = %d\n",
77 rc = ll_objects_destroy(req, file->f_dentry->d_inode);
79 CERROR("inode %lu ll_objects destroy: rc = %d\n",
83 mdc_clear_open_replay_data(och);
84 ptlrpc_req_finished(req);
85 och->och_fh.cookie = DEAD_HANDLE_MAGIC;
86 file->private_data = NULL;
87 OBD_SLAB_FREE(fd, ll_file_data_slab, sizeof *fd);
92 /* While this returns an error code, fput() the caller does not, so we need
93 * to make every effort to clean up all of our state here. Also, applications
94 * rarely check close errors and even if an error is returned they will not
95 * re-try the close call.
97 int ll_file_release(struct inode *inode, struct file *file)
99 struct ll_file_data *fd;
100 struct ll_sb_info *sbi = ll_i2sbi(inode);
104 CDEBUG(D_VFSTRACE, "VFS Op:inode=%u/%lu/%u(%p)\n",
105 ll_i2info(inode)->lli_mds, inode->i_ino,
106 inode->i_generation, inode);
108 /* don't do anything for / */
109 if (inode->i_sb->s_root == file->f_dentry)
112 lprocfs_counter_incr(sbi->ll_stats, LPROC_LL_RELEASE);
113 fd = (struct ll_file_data *)file->private_data;
116 rc = ll_mdc_close(sbi->ll_mdc_exp, inode, file);
120 static int ll_intent_file_open(struct file *file, void *lmm,
121 int lmmsize, struct lookup_intent *itp)
123 struct ll_sb_info *sbi = ll_i2sbi(file->f_dentry->d_inode);
124 struct lustre_handle lockh;
125 struct mdc_op_data data;
126 struct dentry *parent = file->f_dentry->d_parent;
127 const char *name = file->f_dentry->d_name.name;
128 const int len = file->f_dentry->d_name.len;
134 ll_prepare_mdc_op_data(&data, parent->d_inode, NULL, name, len, O_RDWR);
136 rc = md_enqueue(sbi->ll_mdc_exp, LDLM_IBITS, itp, LCK_PR, &data,
137 &lockh, lmm, lmmsize, ldlm_completion_ast,
138 ll_mdc_blocking_ast, NULL);
140 if (itp->d.lustre.it_lock_mode)
141 memcpy(&itp->d.lustre.it_lock_handle,
142 &lockh, sizeof(lockh));
144 CERROR("lock enqueue: err: %d\n", rc);
149 static int ll_local_open(struct file *file, struct lookup_intent *it)
151 struct ptlrpc_request *req = it->d.lustre.it_data;
152 struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
153 struct ll_file_data *fd;
154 struct mds_body *body;
157 body = lustre_msg_buf (req->rq_repmsg, 1, sizeof (*body));
158 LASSERT (body != NULL); /* reply already checked out */
159 LASSERT_REPSWABBED (req, 1); /* and swabbed down */
161 LASSERT(!file->private_data);
163 OBD_SLAB_ALLOC(fd, ll_file_data_slab, SLAB_KERNEL, sizeof *fd);
164 /* We can't handle this well without reorganizing ll_file_open and
165 * ll_mdc_close, so don't even try right now. */
168 memcpy(&fd->fd_mds_och.och_fh, &body->handle, sizeof(body->handle));
169 fd->fd_mds_och.och_magic = OBD_CLIENT_HANDLE_MAGIC;
170 file->private_data = fd;
171 ll_readahead_init(file->f_dentry->d_inode, &fd->fd_ras);
173 lli->lli_io_epoch = body->io_epoch;
175 mdc_set_open_replay_data(&fd->fd_mds_och, it->d.lustre.it_data);
180 /* Open a file, and (for the very first open) create objects on the OSTs at
181 * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
182 * creation or open until ll_lov_setstripe() ioctl is called. We grab
183 * lli_open_sem to ensure no other process will create objects, send the
184 * stripe MD to the MDS, or try to destroy the objects if that fails.
186 * If we already have the stripe MD locally then we don't request it in
187 * mdc_open(), by passing a lmm_size = 0.
189 * It is up to the application to ensure no other processes open this file
190 * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
191 * used. We might be able to avoid races of that sort by getting lli_open_sem
192 * before returning in the O_LOV_DELAY_CREATE case and dropping it here
193 * or in ll_file_release(), but I'm not sure that is desirable/necessary.
195 int ll_file_open(struct inode *inode, struct file *file)
197 struct ll_inode_info *lli = ll_i2info(inode);
198 struct lookup_intent *it;
199 struct lov_stripe_md *lsm;
200 struct ptlrpc_request *req;
204 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
205 inode->i_generation, inode);
207 /* don't do anything for / */
208 if (inode->i_sb->s_root == file->f_dentry)
213 if (!it->d.lustre.it_disposition) {
214 struct lookup_intent oit = { .it_op = IT_OPEN,
215 .it_flags = file->f_flags };
217 rc = ll_intent_file_open(file, NULL, 0, it);
222 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_OPEN);
223 rc = it_open_error(DISP_OPEN_OPEN, it);
227 rc = ll_local_open(file, it);
231 ll_intent_drop_lock(it);
233 if (!S_ISREG(inode->i_mode))
238 if (file->f_flags & O_LOV_DELAY_CREATE ||
239 !(file->f_mode & FMODE_WRITE)) {
240 CDEBUG(D_INODE, "object creation was delayed\n");
244 file->f_flags &= ~O_LOV_DELAY_CREATE;
247 req = it->d.lustre.it_data;
248 ptlrpc_req_finished(req);
250 ll_open_complete(inode);
254 /* Fills the obdo with the attributes for the inode defined by lsm */
255 int ll_lsm_getattr(struct obd_export *exp, struct lov_stripe_md *lsm,
258 struct ptlrpc_request_set *set;
262 LASSERT(lsm != NULL);
264 memset(oa, 0, sizeof *oa);
265 oa->o_id = lsm->lsm_object_id;
266 oa->o_gr = lsm->lsm_object_gr;
267 oa->o_mode = S_IFREG;
268 oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLSIZE |
269 OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
270 OBD_MD_FLCTIME | OBD_MD_FLGROUP;
272 set = ptlrpc_prep_set();
274 CERROR ("ENOMEM allocing request set\n");
277 rc = obd_getattr_async(exp, oa, lsm, set);
279 rc = ptlrpc_set_wait(set);
280 ptlrpc_set_destroy(set);
285 oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
286 OBD_MD_FLCTIME | OBD_MD_FLSIZE);
290 static inline void ll_remove_suid(struct inode *inode)
294 /* set S_IGID if S_IXGRP is set, and always set S_ISUID */
295 mode = (inode->i_mode & S_IXGRP)*(S_ISGID/S_IXGRP) | S_ISUID;
297 /* was any of the uid bits set? */
298 mode &= inode->i_mode;
299 if (mode && !capable(CAP_FSETID)) {
300 inode->i_mode &= ~mode;
301 // XXX careful here - we cannot change the size
305 static int ll_lock_to_stripe_offset(struct inode *inode, struct ldlm_lock *lock)
307 struct ll_inode_info *lli = ll_i2info(inode);
308 struct lov_stripe_md *lsm = lli->lli_smd;
309 struct obd_export *exp = ll_i2obdexp(inode);
312 struct ldlm_lock *lock;
313 struct lov_stripe_md *lsm;
314 } key = { .name = "lock_to_stripe", .lock = lock, .lsm = lsm };
315 __u32 stripe, vallen = sizeof(stripe);
319 if (lsm->lsm_stripe_count == 1)
322 /* get our offset in the lov */
323 rc = obd_get_info(exp, sizeof(key), &key, &vallen, &stripe);
325 CERROR("obd_get_info: rc = %d\n", rc);
328 LASSERT(stripe < lsm->lsm_stripe_count);
332 /* Flush the page cache for an extent as its canceled. When we're on an LOV,
333 * we get a lock cancellation for each stripe, so we have to map the obd's
334 * region back onto the stripes in the file that it held.
336 * No one can dirty the extent until we've finished our work and they can
337 * enqueue another lock. The DLM protects us from ll_file_read/write here,
338 * but other kernel actors could have pages locked.
340 * Called with the DLM lock held. */
341 void ll_pgcache_remove_extent(struct inode *inode, struct lov_stripe_md *lsm,
342 struct ldlm_lock *lock, __u32 stripe)
344 ldlm_policy_data_t tmpex;
345 unsigned long start, end, count, skip, i, j;
347 int rc, rc2, discard = lock->l_flags & LDLM_FL_DISCARD_DATA;
348 struct lustre_handle lockh;
351 memcpy(&tmpex, &lock->l_policy_data, sizeof(tmpex));
352 CDEBUG(D_INODE|D_PAGE, "inode %lu(%p) ["LPU64"->"LPU64"] size: %llu\n",
353 inode->i_ino, inode, tmpex.l_extent.start, tmpex.l_extent.end,
356 /* our locks are page granular thanks to osc_enqueue, we invalidate the
358 LASSERT((tmpex.l_extent.start & ~PAGE_CACHE_MASK) == 0);
359 LASSERT(((tmpex.l_extent.end + 1) & ~PAGE_CACHE_MASK) == 0);
363 start = tmpex.l_extent.start >> PAGE_CACHE_SHIFT;
364 end = tmpex.l_extent.end >> PAGE_CACHE_SHIFT;
365 if (lsm->lsm_stripe_count > 1) {
366 count = lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT;
367 skip = (lsm->lsm_stripe_count - 1) * count;
368 start += start/count * skip + stripe * count;
370 end += end/count * skip + stripe * count;
372 if (end < tmpex.l_extent.end >> PAGE_CACHE_SHIFT)
375 i = (inode->i_size + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
379 CDEBUG(D_INODE|D_PAGE, "walking page indices start: %lu j: %lu "
380 "count: %lu skip: %lu end: %lu%s\n", start, start % count,
381 count, skip, end, discard ? " (DISCARDING)" : "");
383 /* this is the simplistic implementation of page eviction at
384 * cancelation. It is careful to get races with other page
385 * lockers handled correctly. fixes from bug 20 will make it
386 * more efficient by associating locks with pages and with
387 * batching writeback under the lock explicitly. */
388 for (i = start, j = start % count; i <= end;
389 j++, i++, tmpex.l_extent.start += PAGE_CACHE_SIZE) {
391 CDEBUG(D_PAGE, "skip index %lu to %lu\n", i, i + skip);
397 LASSERTF(tmpex.l_extent.start< lock->l_policy_data.l_extent.end,
398 LPU64" >= "LPU64" start %lu i %lu end %lu\n",
399 tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
402 ll_pgcache_lock(inode->i_mapping);
403 if (list_empty(&inode->i_mapping->dirty_pages) &&
404 list_empty(&inode->i_mapping->clean_pages) &&
405 list_empty(&inode->i_mapping->locked_pages)) {
406 CDEBUG(D_INODE|D_PAGE, "nothing left\n");
407 ll_pgcache_unlock(inode->i_mapping);
410 ll_pgcache_unlock(inode->i_mapping);
412 conditional_schedule();
414 page = find_get_page(inode->i_mapping, i);
417 LL_CDEBUG_PAGE(D_PAGE, page, "lock page idx %lu ext "LPU64"\n",
418 i, tmpex.l_extent.start);
421 /* page->mapping to check with racing against teardown */
422 if (page->mapping && PageDirty(page) && !discard) {
423 ClearPageDirty(page);
424 LL_CDEBUG_PAGE(D_PAGE, page, "found dirty\n");
425 ll_pgcache_lock(inode->i_mapping);
426 list_del(&page->list);
427 list_add(&page->list, &inode->i_mapping->locked_pages);
428 ll_pgcache_unlock(inode->i_mapping);
430 rc = ll_call_writepage(inode, page);
432 CERROR("writepage of page %p failed: %d\n",
434 /* either waiting for io to complete or reacquiring
435 * the lock that the failed writepage released */
439 tmpex.l_extent.end = tmpex.l_extent.start + PAGE_CACHE_SIZE - 1;
440 /* check to see if another DLM lock covers this page */
441 ldlm_lock2handle(lock, &lockh);
442 rc2 = ldlm_lock_match(NULL,
443 LDLM_FL_BLOCK_GRANTED|LDLM_FL_CBPENDING |
445 NULL, 0, &tmpex, 0, &lockh);
446 if (rc2 == 0 && page->mapping != NULL) {
447 // checking again to account for writeback's lock_page()
448 LL_CDEBUG_PAGE(D_PAGE, page, "truncating\n");
449 ll_truncate_complete_page(page);
452 page_cache_release(page);
454 LASSERTF(tmpex.l_extent.start <=
455 (lock->l_policy_data.l_extent.end == ~0ULL ? ~0ULL :
456 lock->l_policy_data.l_extent.end + 1),
457 "loop too long "LPU64" > "LPU64" start %lu i %lu end %lu\n",
458 tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
463 static int ll_extent_lock_callback(struct ldlm_lock *lock,
464 struct ldlm_lock_desc *new, void *data,
467 struct lustre_handle lockh = { 0 };
471 if ((unsigned long)data > 0 && (unsigned long)data < 0x1000) {
472 LDLM_ERROR(lock, "cancelling lock with bad data %p", data);
477 case LDLM_CB_BLOCKING:
478 ldlm_lock2handle(lock, &lockh);
479 rc = ldlm_cli_cancel(&lockh);
481 CERROR("ldlm_cli_cancel failed: %d\n", rc);
483 case LDLM_CB_CANCELING: {
485 struct ll_inode_info *lli;
486 struct lov_stripe_md *lsm;
490 /* This lock wasn't granted, don't try to evict pages */
491 if (lock->l_req_mode != lock->l_granted_mode)
494 inode = ll_inode_from_lock(lock);
497 lli = ll_i2info(inode);
500 if (lli->lli_smd == NULL)
504 stripe = ll_lock_to_stripe_offset(inode, lock);
505 ll_pgcache_remove_extent(inode, lsm, lock, stripe);
508 kms = ldlm_extent_shift_kms(lock,
509 lsm->lsm_oinfo[stripe].loi_kms);
510 if (lsm->lsm_oinfo[stripe].loi_kms != kms)
511 LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
512 lsm->lsm_oinfo[stripe].loi_kms, kms);
513 lsm->lsm_oinfo[stripe].loi_kms = kms;
515 //ll_try_done_writing(inode);
528 int ll_async_completion_ast(struct ldlm_lock *lock, int flags, void *data)
530 /* XXX ALLOCATE - 160 bytes */
531 struct inode *inode = ll_inode_from_lock(lock);
532 struct ll_inode_info *lli = ll_i2info(inode);
533 struct lustre_handle lockh = { 0 };
538 if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
539 LDLM_FL_BLOCK_CONV)) {
540 LBUG(); /* not expecting any blocked async locks yet */
541 LDLM_DEBUG(lock, "client-side async enqueue returned a blocked "
543 ldlm_lock_dump(D_OTHER, lock, 0);
544 ldlm_reprocess_all(lock->l_resource);
548 LDLM_DEBUG(lock, "client-side async enqueue: granted/glimpsed");
550 stripe = ll_lock_to_stripe_offset(inode, lock);
552 if (lock->l_lvb_len) {
553 struct lov_stripe_md *lsm = lli->lli_smd;
555 lvb = lock->l_lvb_data;
556 lsm->lsm_oinfo[stripe].loi_rss = lvb->lvb_size;
559 kms = MAX(lsm->lsm_oinfo[stripe].loi_kms, lvb->lvb_size);
560 kms = ldlm_extent_shift_kms(NULL, kms);
561 if (lsm->lsm_oinfo[stripe].loi_kms != kms)
562 LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
563 lsm->lsm_oinfo[stripe].loi_kms, kms);
564 lsm->lsm_oinfo[stripe].loi_kms = kms;
569 wake_up(&lock->l_waitq);
571 ldlm_lock2handle(lock, &lockh);
572 ldlm_lock_decref(&lockh, LCK_PR);
577 static int ll_glimpse_callback(struct ldlm_lock *lock, void *reqp)
579 struct ptlrpc_request *req = reqp;
580 struct inode *inode = ll_inode_from_lock(lock);
581 struct ll_inode_info *lli;
583 int rc, size = sizeof(*lvb), stripe = 0;
587 GOTO(out, rc = -ELDLM_NO_LOCK_DATA);
588 lli = ll_i2info(inode);
590 GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
591 if (lli->lli_smd == NULL)
592 GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
594 /* First, find out which stripe index this lock corresponds to. */
595 if (lli->lli_smd->lsm_stripe_count > 1)
596 stripe = ll_lock_to_stripe_offset(inode, lock);
598 rc = lustre_pack_reply(req, 1, &size, NULL);
600 CERROR("lustre_pack_reply: %d\n", rc);
604 lvb = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*lvb));
605 lvb->lvb_size = lli->lli_smd->lsm_oinfo[stripe].loi_kms;
607 LDLM_DEBUG(lock, "i_size: %llu -> stripe number %u -> kms "LPU64,
608 inode->i_size, stripe, lvb->lvb_size);
614 /* These errors are normal races, so we don't want to fill the console
615 * with messages by calling ptlrpc_error() */
616 if (rc == -ELDLM_NO_LOCK_DATA)
617 lustre_pack_reply(req, 0, NULL, NULL);
623 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
624 __u64 lov_merge_mtime(struct lov_stripe_md *lsm, __u64 current_time);
626 /* NB: lov_merge_size will prefer locally cached writes if they extend the
627 * file (because it prefers KMS over RSS when larger) */
628 int ll_glimpse_size(struct inode *inode, struct ost_lvb *lvb)
630 struct ll_inode_info *lli = ll_i2info(inode);
631 struct ll_sb_info *sbi = ll_i2sbi(inode);
632 ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
633 struct lustre_handle lockh;
634 int rc, flags = LDLM_FL_HAS_INTENT;
637 CDEBUG(D_DLMTRACE, "Glimpsing inode %lu\n", inode->i_ino);
639 rc = obd_enqueue(sbi->ll_osc_exp, lli->lli_smd, LDLM_EXTENT, &policy,
640 LCK_PR, &flags, ll_extent_lock_callback,
641 ldlm_completion_ast, ll_glimpse_callback, inode,
642 sizeof(*lvb), lustre_swab_ost_lvb, &lockh);
644 CERROR("obd_enqueue returned rc %d, returning -EIO\n", rc);
645 RETURN(rc > 0 ? -EIO : rc);
648 lvb->lvb_size = lov_merge_size(lli->lli_smd, 0);
649 //inode->i_mtime = lov_merge_mtime(lli->lli_smd, inode->i_mtime);
651 CDEBUG(D_DLMTRACE, "glimpse: size: "LPU64"\n", lvb->lvb_size);
653 obd_cancel(sbi->ll_osc_exp, lli->lli_smd, LCK_PR, &lockh);
658 int ll_extent_lock(struct ll_file_data *fd, struct inode *inode,
659 struct lov_stripe_md *lsm, int mode,
660 ldlm_policy_data_t *policy, struct lustre_handle *lockh,
663 struct ll_sb_info *sbi = ll_i2sbi(inode);
667 LASSERT(lockh->cookie == 0);
669 /* XXX phil: can we do this? won't it screw the file size up? */
670 if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
671 (sbi->ll_flags & LL_SBI_NOLCK))
674 CDEBUG(D_DLMTRACE, "Locking inode %lu, start "LPU64" end "LPU64"\n",
675 inode->i_ino, policy->l_extent.start, policy->l_extent.end);
677 rc = obd_enqueue(sbi->ll_osc_exp, lsm, LDLM_EXTENT, policy, mode,
678 &ast_flags, ll_extent_lock_callback,
679 ldlm_completion_ast, ll_glimpse_callback, inode,
680 sizeof(struct ost_lvb), lustre_swab_ost_lvb, lockh);
684 if (policy->l_extent.start == 0 &&
685 policy->l_extent.end == OBD_OBJECT_EOF)
686 inode->i_size = lov_merge_size(lsm, 1);
688 //inode->i_mtime = lov_merge_mtime(lsm, inode->i_mtime);
693 int ll_extent_unlock(struct ll_file_data *fd, struct inode *inode,
694 struct lov_stripe_md *lsm, int mode,
695 struct lustre_handle *lockh)
697 struct ll_sb_info *sbi = ll_i2sbi(inode);
701 /* XXX phil: can we do this? won't it screw the file size up? */
702 if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
703 (sbi->ll_flags & LL_SBI_NOLCK))
706 rc = obd_cancel(sbi->ll_osc_exp, lsm, mode, lockh);
711 static ssize_t ll_file_read(struct file *filp, char *buf, size_t count,
714 struct ll_file_data *fd = filp->private_data;
715 struct inode *inode = filp->f_dentry->d_inode;
716 struct ll_inode_info *lli = ll_i2info(inode);
717 struct lov_stripe_md *lsm = lli->lli_smd;
718 struct lustre_handle lockh = { 0 };
719 ldlm_policy_data_t policy;
724 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
725 inode->i_ino, inode->i_generation, inode, count, *ppos);
727 /* "If nbyte is 0, read() will return 0 and have no other results."
728 * -- Single Unix Spec */
732 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats, LPROC_LL_READ_BYTES,
738 policy.l_extent.start = *ppos;
739 policy.l_extent.end = *ppos + count - 1;
741 err = ll_extent_lock(fd, inode, lsm, LCK_PR, &policy, &lockh,
742 (filp->f_flags & O_NONBLOCK)?LDLM_FL_BLOCK_NOWAIT:
747 kms = lov_merge_size(lsm, 1);
748 if (policy.l_extent.end > kms) {
749 /* A glimpse is necessary to determine whether we return a short
750 * read or some zeroes at the end of the buffer */
752 retval = ll_glimpse_size(inode, &lvb);
755 inode->i_size = lvb.lvb_size;
760 CDEBUG(D_INFO, "Read ino %lu, "LPSZ" bytes, offset %lld, i_size %llu\n",
761 inode->i_ino, count, *ppos, inode->i_size);
763 /* turn off the kernel's read-ahead */
764 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
767 filp->f_ra.ra_pages = 0;
769 retval = generic_file_read(filp, buf, count, ppos);
772 ll_extent_unlock(fd, inode, lsm, LCK_PR, &lockh);
777 * Write to a file (through the page cache).
779 static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
782 struct ll_file_data *fd = file->private_data;
783 struct inode *inode = file->f_dentry->d_inode;
784 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
785 struct lustre_handle lockh = { 0 };
786 ldlm_policy_data_t policy;
787 loff_t maxbytes = ll_file_maxbytes(inode);
792 if (file->f_flags & O_NONBLOCK)
793 nonblock = LDLM_FL_BLOCK_NOWAIT;
794 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
795 inode->i_ino, inode->i_generation, inode, count, *ppos);
797 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
799 /* POSIX, but surprised the VFS doesn't check this already */
803 /* If file was opened for LL_IOC_LOV_SETSTRIPE but the ioctl wasn't
804 * called on the file, don't fail the below assertion (bug 2388). */
805 if (file->f_flags & O_LOV_DELAY_CREATE && lsm == NULL)
810 if (file->f_flags & O_APPEND) {
811 policy.l_extent.start = 0;
812 policy.l_extent.end = OBD_OBJECT_EOF;
814 policy.l_extent.start = *ppos;
815 policy.l_extent.end = *ppos + count - 1;
818 err = ll_extent_lock(fd, inode, lsm, LCK_PW, &policy, &lockh, nonblock);
822 /* this is ok, g_f_w will overwrite this under i_sem if it races
823 * with a local truncate, it just makes our maxbyte checking easier */
824 if (file->f_flags & O_APPEND)
825 *ppos = inode->i_size;
827 if (*ppos >= maxbytes) {
828 if (count || *ppos > maxbytes) {
829 send_sig(SIGXFSZ, current, 0);
830 GOTO(out, retval = -EFBIG);
833 if (*ppos + count > maxbytes)
834 count = maxbytes - *ppos;
836 CDEBUG(D_INFO, "Writing inode %lu, "LPSZ" bytes, offset %Lu\n",
837 inode->i_ino, count, *ppos);
839 /* generic_file_write handles O_APPEND after getting i_sem */
840 retval = generic_file_write(file, buf, count, ppos);
843 ll_extent_unlock(fd, inode, lsm, LCK_PW, &lockh);
844 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats, LPROC_LL_WRITE_BYTES,
845 retval > 0 ? retval : 0);
849 static int ll_lov_recreate_obj(struct inode *inode, struct file *file,
852 struct ll_inode_info *lli = ll_i2info(inode);
853 struct obd_export *exp = ll_i2obdexp(inode);
854 struct ll_recreate_obj ucreatp;
855 struct obd_trans_info oti = { 0 };
856 struct obdo *oa = NULL;
859 struct lov_stripe_md *lsm, *lsm2;
862 if (!capable (CAP_SYS_ADMIN))
865 rc = copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
866 sizeof(struct ll_recreate_obj));
875 down(&lli->lli_open_sem);
878 up(&lli->lli_open_sem);
882 lsm_size = sizeof(*lsm) + (sizeof(struct lov_oinfo) *
883 (lsm->lsm_stripe_count));
885 OBD_ALLOC(lsm2, lsm_size);
887 up(&lli->lli_open_sem);
892 oa->o_id = ucreatp.lrc_id;
893 oa->o_nlink = ucreatp.lrc_ost_idx;
894 oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS;
895 oa->o_flags |= OBD_FL_RECREATE_OBJS;
896 obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
897 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
899 oti.oti_objid = NULL;
900 memcpy(lsm2, lsm, lsm_size);
901 rc = obd_create(exp, oa, &lsm2, &oti);
903 up(&lli->lli_open_sem);
904 OBD_FREE(lsm2, lsm_size);
909 static int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
910 int flags, struct lov_user_md *lum,
913 struct ll_inode_info *lli = ll_i2info(inode);
915 struct obd_export *exp = ll_i2obdexp(inode);
916 struct lov_stripe_md *lsm;
917 struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
918 struct ptlrpc_request *req = NULL;
923 down(&lli->lli_open_sem);
926 up(&lli->lli_open_sem);
927 CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
932 f = get_empty_filp();
936 f->f_dentry = file->f_dentry;
937 f->f_vfsmnt = file->f_vfsmnt;
939 rc = ll_intent_file_open(f, lum, lum_size, &oit);
942 if (it_disposition(&oit, DISP_LOOKUP_NEG))
944 req = oit.d.lustre.it_data;
945 rc = oit.d.lustre.it_status;
950 rc = mdc_req2lustre_md(req, 1, exp, NULL, &md);
953 ll_update_inode(f->f_dentry->d_inode, &md);
955 rc = ll_local_open(f, &oit);
958 ll_intent_release(&oit);
960 rc = ll_file_release(f->f_dentry->d_inode, f);
965 up(&lli->lli_open_sem);
967 ptlrpc_req_finished(req);
971 static int ll_lov_setea(struct inode *inode, struct file *file,
974 int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
975 struct lov_user_md *lump;
976 int lum_size = sizeof(struct lov_user_md) +
977 sizeof(struct lov_user_ost_data);
981 if (!capable (CAP_SYS_ADMIN))
984 OBD_ALLOC(lump, lum_size);
988 rc = copy_from_user(lump, (struct lov_user_md *)arg,
991 OBD_FREE(lump, lum_size);
995 rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
997 OBD_FREE(lump, lum_size);
1001 static int ll_lov_setstripe(struct inode *inode, struct file *file,
1004 struct lov_user_md lum, *lump = (struct lov_user_md *)arg;
1006 int flags = FMODE_WRITE;
1009 /* Bug 1152: copy properly when this is no longer true */
1010 LASSERT(sizeof(lum) == sizeof(*lump));
1011 LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0]));
1012 rc = copy_from_user(&lum, lump, sizeof(lum));
1016 rc = ll_lov_setstripe_ea_info(inode, file, flags, &lum, sizeof(lum));
1020 static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
1022 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1027 return obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2obdexp(inode), 0, lsm,
1031 static int ll_get_cwlock(struct inode *inode, struct file *file,
1034 struct ll_file_data *fd = file->private_data;
1035 ldlm_policy_data_t policy = { .l_extent = { .start = 0,
1036 .end = OBD_OBJECT_EOF}};
1037 struct lustre_handle lockh = { 0 };
1038 struct ll_inode_info *lli = ll_i2info(inode);
1039 struct lov_stripe_md *lsm = lli->lli_smd;
1044 if (fd->fd_flags & LL_FILE_CW_LOCKED) {
1048 policy.l_extent.gid = arg;
1049 if (file->f_flags & O_NONBLOCK)
1050 flags = LDLM_FL_BLOCK_NOWAIT;
1052 err = ll_extent_lock(fd, inode, lsm, LCK_CW, &policy, &lockh, flags);
1056 fd->fd_flags |= LL_FILE_CW_LOCKED|LL_FILE_IGNORE_LOCK;
1058 memcpy(&fd->fd_cwlockh, &lockh, sizeof(lockh));
1063 static int ll_put_cwlock(struct inode *inode, struct file *file,
1066 struct ll_file_data *fd = file->private_data;
1067 struct ll_inode_info *lli = ll_i2info(inode);
1068 struct lov_stripe_md *lsm = lli->lli_smd;
1072 if (!(fd->fd_flags & LL_FILE_CW_LOCKED)) {
1073 /* Ugh, it's already unlocked. */
1077 if (fd->fd_gid != arg) /* Ugh? Unlocking with different gid? */
1080 fd->fd_flags &= ~(LL_FILE_CW_LOCKED|LL_FILE_IGNORE_LOCK);
1082 err = ll_extent_unlock(fd, inode, lsm, LCK_CW, &fd->fd_cwlockh);
1087 memset(&fd->fd_cwlockh, 0, sizeof(fd->fd_cwlockh));
1092 int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
1095 struct ll_file_data *fd = file->private_data;
1099 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino,
1100 inode->i_generation, inode, cmd);
1102 if (_IOC_TYPE(cmd) == 'T') /* tty ioctls */
1105 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_IOCTL);
1107 case LL_IOC_GETFLAGS:
1108 /* Get the current value of the file flags */
1109 return put_user(fd->fd_flags, (int *)arg);
1110 case LL_IOC_SETFLAGS:
1111 case LL_IOC_CLRFLAGS:
1112 /* Set or clear specific file flags */
1113 /* XXX This probably needs checks to ensure the flags are
1114 * not abused, and to handle any flag side effects.
1116 if (get_user(flags, (int *) arg))
1119 if (cmd == LL_IOC_SETFLAGS)
1120 fd->fd_flags |= flags;
1122 fd->fd_flags &= ~flags;
1124 case LL_IOC_LOV_SETSTRIPE:
1125 RETURN(ll_lov_setstripe(inode, file, arg));
1126 case LL_IOC_LOV_SETEA:
1127 RETURN( ll_lov_setea(inode, file, arg) );
1128 case LL_IOC_LOV_GETSTRIPE:
1129 RETURN(ll_lov_getstripe(inode, arg));
1130 case LL_IOC_RECREATE_OBJ:
1131 RETURN(ll_lov_recreate_obj(inode, file, arg));
1132 case EXT3_IOC_GETFLAGS:
1133 case EXT3_IOC_SETFLAGS:
1134 RETURN( ll_iocontrol(inode, file, cmd, arg) );
1135 case LL_IOC_CW_LOCK:
1136 RETURN(ll_get_cwlock(inode, file, arg));
1137 case LL_IOC_CW_UNLOCK:
1138 RETURN(ll_put_cwlock(inode, file, arg));
1139 /* We need to special case any other ioctls we want to handle,
1140 * to send them to the MDS/OST as appropriate and to properly
1141 * network encode the arg field.
1142 case EXT2_IOC_GETVERSION_OLD:
1143 case EXT2_IOC_GETVERSION_NEW:
1144 case EXT2_IOC_SETVERSION_OLD:
1145 case EXT2_IOC_SETVERSION_NEW:
1148 RETURN( obd_iocontrol(cmd, ll_i2obdexp(inode), 0, NULL,
1153 loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
1155 struct inode *inode = file->f_dentry->d_inode;
1156 struct ll_file_data *fd = file->private_data;
1157 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1158 struct lustre_handle lockh = {0};
1161 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),to=%llu\n", inode->i_ino,
1162 inode->i_generation, inode,
1163 offset + ((origin==2) ? inode->i_size : file->f_pos));
1165 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_LLSEEK);
1166 if (origin == 2) { /* SEEK_END */
1169 ldlm_policy_data_t policy = { .l_extent = {0, OBD_OBJECT_EOF }};
1171 if (file->f_flags & O_NONBLOCK)
1172 nonblock = LDLM_FL_BLOCK_NOWAIT;
1174 err = ll_extent_lock(fd, inode, lsm, LCK_PR, &policy, &lockh,
1176 if (err != ELDLM_OK)
1179 offset += inode->i_size;
1180 } else if (origin == 1) { /* SEEK_CUR */
1181 offset += file->f_pos;
1185 if (offset >= 0 && offset <= ll_file_maxbytes(inode)) {
1186 if (offset != file->f_pos) {
1187 file->f_pos = offset;
1188 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1190 file->f_version = ++event;
1197 ll_extent_unlock(fd, inode, lsm, LCK_PR, &lockh);
1201 int ll_fsync(struct file *file, struct dentry *dentry, int data)
1203 struct inode *inode = dentry->d_inode;
1204 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1206 struct ptlrpc_request *req;
1209 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1210 inode->i_generation, inode);
1212 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_FSYNC);
1214 /* fsync's caller has already called _fdata{sync,write}, we want
1215 * that IO to finish before calling the osc and mdc sync methods */
1216 rc = filemap_fdatawait(inode->i_mapping);
1218 ll_inode2fid(&fid, inode);
1219 err = md_sync(ll_i2sbi(inode)->ll_mdc_exp, &fid, &req);
1223 ptlrpc_req_finished(req);
1226 struct obdo *oa = obdo_alloc();
1229 RETURN(rc ? rc : -ENOMEM);
1231 oa->o_id = lsm->lsm_object_id;
1232 oa->o_gr = lsm->lsm_object_gr;
1233 oa->o_valid = OBD_MD_FLID;
1234 obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
1235 OBD_MD_FLMTIME | OBD_MD_FLCTIME |
1238 err = obd_sync(ll_i2sbi(inode)->ll_osc_exp, oa, lsm,
1248 int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
1250 struct inode *inode = file->f_dentry->d_inode;
1251 struct ll_sb_info *sbi = ll_i2sbi(inode);
1252 struct obd_device *obddev;
1253 struct ldlm_res_id res_id =
1254 { .name = {inode->i_ino, inode->i_generation, LDLM_FLOCK} };
1255 struct lustre_handle lockh = {0};
1256 ldlm_policy_data_t flock;
1257 ldlm_mode_t mode = 0;
1262 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
1263 inode->i_ino, file_lock);
1265 flock.l_flock.pid = file_lock->fl_pid;
1266 flock.l_flock.start = file_lock->fl_start;
1267 flock.l_flock.end = file_lock->fl_end;
1269 switch (file_lock->fl_type) {
1274 /* An unlock request may or may not have any relation to
1275 * existing locks so we may not be able to pass a lock handle
1276 * via a normal ldlm_lock_cancel() request. The request may even
1277 * unlock a byte range in the middle of an existing lock. In
1278 * order to process an unlock request we need all of the same
1279 * information that is given with a normal read or write record
1280 * lock request. To avoid creating another ldlm unlock (cancel)
1281 * message we'll treat a LCK_NL flock request as an unlock. */
1288 CERROR("unknown fcntl lock type: %d\n", file_lock->fl_type);
1297 flags = LDLM_FL_BLOCK_NOWAIT;
1300 flags = LDLM_FL_TEST_LOCK;
1301 /* Save the old mode so that if the mode in the lock changes we
1302 * can decrement the appropriate reader or writer refcount. */
1303 file_lock->fl_type = mode;
1306 CERROR("unknown fcntl lock command: %d\n", cmd);
1310 CDEBUG(D_DLMTRACE, "inode=%lu, pid="LPU64", flags=%#x, mode=%u, "
1311 "start="LPU64", end="LPU64"\n", inode->i_ino, flock.l_flock.pid,
1312 flags, mode, flock.l_flock.start, flock.l_flock.end);
1314 obddev = md_get_real_obd(sbi->ll_mdc_exp, NULL, 0);
1315 rc = ldlm_cli_enqueue(sbi->ll_mdc_exp, NULL, obddev->obd_namespace,
1316 res_id, LDLM_FLOCK, &flock, mode, &flags,
1317 NULL, ldlm_flock_completion_ast, NULL, file_lock,
1318 NULL, 0, NULL, &lockh);
1322 int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it)
1324 struct inode *inode = dentry->d_inode;
1325 struct ll_inode_info *lli;
1326 struct lov_stripe_md *lsm;
1332 CERROR("REPORT THIS LINE TO PETER\n");
1335 ll_inode2fid(&fid, inode);
1336 lli = ll_i2info(inode);
1337 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%s,intent=%s\n",
1338 inode->i_ino, inode->i_generation, inode, dentry->d_name.name,
1340 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0))
1341 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_REVALIDATE);
1344 if (!md_valid_attrs(ll_i2mdcexp(inode), &fid)) {
1345 struct ptlrpc_request *req = NULL;
1346 struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
1348 unsigned long valid = 0;
1351 if (S_ISREG(inode->i_mode)) {
1352 ealen = obd_size_diskmd(sbi->ll_osc_exp, NULL);
1353 valid |= OBD_MD_FLEASIZE;
1355 ll_inode2fid(&fid, inode);
1356 rc = md_getattr(sbi->ll_mdc_exp, &fid, valid, ealen, &req);
1358 CERROR("failure %d inode %lu\n", rc, inode->i_ino);
1361 rc = ll_prep_inode(sbi->ll_osc_exp, sbi->ll_mdc_exp,
1362 &inode, req, 0, NULL);
1364 ptlrpc_req_finished(req);
1367 ptlrpc_req_finished(req);
1371 if (lsm == NULL) /* object not yet allocated, don't validate size */
1374 /* ll_glimpse_size will prefer locally cached writes if they extend
1379 rc = ll_glimpse_size(inode, &lvb);
1380 inode->i_size = lvb.lvb_size;
1385 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1386 int ll_getattr(struct vfsmount *mnt, struct dentry *de,
1387 struct lookup_intent *it, struct kstat *stat)
1390 struct inode *inode = de->d_inode;
1392 res = ll_inode_revalidate_it(de, it);
1393 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_GETATTR);
1398 stat->dev = inode->i_sb->s_dev;
1399 stat->ino = inode->i_ino;
1400 stat->mode = inode->i_mode;
1401 stat->nlink = inode->i_nlink;
1402 stat->uid = inode->i_uid;
1403 stat->gid = inode->i_gid;
1404 stat->rdev = kdev_t_to_nr(inode->i_rdev);
1405 stat->atime = inode->i_atime;
1406 stat->mtime = inode->i_mtime;
1407 stat->ctime = inode->i_ctime;
1408 stat->size = inode->i_size;
1409 stat->blksize = inode->i_blksize;
1410 stat->blocks = inode->i_blocks;
1415 struct file_operations ll_file_operations = {
1417 write: ll_file_write,
1418 ioctl: ll_file_ioctl,
1420 release: ll_file_release,
1421 mmap: generic_file_mmap,
1422 llseek: ll_file_seek,
1424 //lock: ll_file_flock
1427 struct inode_operations ll_file_inode_operations = {
1428 setattr_raw: ll_setattr_raw,
1429 setattr: ll_setattr,
1430 truncate: ll_truncate,
1431 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1432 getattr_it: ll_getattr,
1434 revalidate_it: ll_inode_revalidate_it,
1438 struct inode_operations ll_special_inode_operations = {
1439 setattr_raw: ll_setattr_raw,
1440 setattr: ll_setattr,
1441 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1442 getattr_it: ll_getattr,
1444 revalidate_it: ll_inode_revalidate_it,