1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Light Super operations
6 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LLITE
30 #include <sys/types.h>
31 #include <sys/queue.h>
39 #include "llite_lib.h"
42 void llu_pgcache_remove_extent(struct inode *inode, struct lov_stripe_md *lsm,
43 struct ldlm_lock *lock)
45 clear_bit(LLI_F_HAVE_SIZE_LOCK, &(llu_i2info(inode)->lli_flags));
47 struct ldlm_extent *extent = &lock->l_extent;
48 unsigned long start, end, count, skip, i, j;
53 CDEBUG(D_INODE, "obdo %lu inode %p ["LPU64"->"LPU64"] size: %llu\n",
54 inode->i_ino, inode, extent->start, extent->end, inode->i_size);
56 start = extent->start >> PAGE_CACHE_SHIFT;
59 end = (extent->end >> PAGE_CACHE_SHIFT) + 1;
60 if ((end << PAGE_CACHE_SHIFT) < extent->end)
62 if (lsm->lsm_stripe_count > 1) {
65 struct ldlm_lock *lock;
66 struct lov_stripe_md *lsm;
67 } key = { .name = "lock_to_stripe", .lock = lock, .lsm = lsm };
69 __u32 vallen = sizeof(stripe);
72 /* get our offset in the lov */
73 rc = obd_get_info(ll_i2obdconn(inode), sizeof(key),
74 &key, &vallen, &stripe);
76 CERROR("obd_get_info: rc = %d\n", rc);
79 LASSERT(stripe < lsm->lsm_stripe_count);
81 count = lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT;
82 skip = (lsm->lsm_stripe_count - 1) * count;
83 start += (start/count * skip) + (stripe * count);
85 end += (end/count * skip) + (stripe * count);
88 i = (inode->i_size + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
90 clear_bit(LLI_F_HAVE_SIZE_LOCK, &(ll_i2info(inode)->lli_flags));
94 CDEBUG(D_INODE, "start: %lu j: %lu count: %lu skip: %lu end: %lu\n",
95 start, start % count, count, skip, end);
97 /* start writeback on dirty pages in the extent when its PW */
98 for (i = start, j = start % count;
99 lock->l_granted_mode == LCK_PW && i < end; j++, i++) {
104 /* its unlikely, but give us a chance to bail when we're out */
105 PGCACHE_WRLOCK(inode->i_mapping);
106 if (list_empty(&inode->i_mapping->dirty_pages)) {
107 CDEBUG(D_INODE, "dirty list empty\n");
108 PGCACHE_WRUNLOCK(inode->i_mapping);
111 PGCACHE_WRUNLOCK(inode->i_mapping);
116 /* always do a getattr for the first person to pop out of lock
117 * acquisition.. the DID_GETATTR flag and semaphore serialize
118 * this initial race. we used to make a decision based on whether
119 * the lock was matched or acquired, but the matcher could win the
120 * waking race with the first issuer so that was no good..
122 if (test_bit(LLI_F_DID_GETATTR, &lli->lli_flags))
125 down(&lli->lli_getattr_sem);
127 if (!test_bit(LLI_F_DID_GETATTR, &lli->lli_flags)) {
128 rc = ll_inode_getattr(inode, lsm);
130 set_bit(LLI_F_DID_GETATTR, &lli->lli_flags);
134 page_cache_release(page);
138 /* our locks are page granular thanks to osc_enqueue, we invalidate the
140 LASSERT((extent->start & ~PAGE_CACHE_MASK) == 0);
141 LASSERT(((extent->end+1) & ~PAGE_CACHE_MASK) == 0);
142 for (i = start, j = start % count ; i < end ; j++, i++) {
147 PGCACHE_WRLOCK(inode->i_mapping);
148 if (list_empty(&inode->i_mapping->dirty_pages) &&
149 list_empty(&inode->i_mapping->clean_pages) &&
150 list_empty(&inode->i_mapping->locked_pages)) {
151 CDEBUG(D_INODE, "nothing left\n");
152 PGCACHE_WRUNLOCK(inode->i_mapping);
155 PGCACHE_WRUNLOCK(inode->i_mapping);
158 page = find_get_page(inode->i_mapping, i);
161 CDEBUG(D_INODE, "dropping page %p at %lu\n", page, page->index);
163 if (page->mapping) /* might have raced */
164 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
165 truncate_complete_page(page);
167 truncate_complete_page(page->mapping, page);
170 page_cache_release(page);
176 int llu_lock_callback(struct ldlm_lock *lock, struct ldlm_lock_desc *new,
177 void *data, int flag)
179 struct inode *inode = data;
180 struct llu_inode_info *lli = llu_i2info(inode);
181 struct lustre_handle lockh = {0};
189 case LDLM_CB_BLOCKING:
190 ldlm_lock2handle(lock, &lockh);
191 rc = ldlm_cli_cancel(&lockh);
193 CERROR("ldlm_cli_cancel failed: %d\n", rc);
195 case LDLM_CB_CANCELING: {
196 /* FIXME: we could be given 'canceling intents' so that we
197 * could know to write-back or simply throw away the pages
198 * based on if the cancel comes from a desire to, say,
199 * read or truncate.. */
200 llu_pgcache_remove_extent(inode, lli->lli_smd, lock);
211 static int llu_extent_lock_callback(struct ldlm_lock *lock,
212 struct ldlm_lock_desc *new, void *data,
215 struct lustre_handle lockh = { 0 };
220 if ((unsigned long)data > 0 && (unsigned long)data < 0x1000) {
221 LDLM_ERROR(lock, "cancelling lock with bad data %p", data);
226 case LDLM_CB_BLOCKING:
227 ldlm_lock2handle(lock, &lockh);
228 rc = ldlm_cli_cancel(&lockh);
230 CERROR("ldlm_cli_cancel failed: %d\n", rc);
232 case LDLM_CB_CANCELING: {
233 struct inode *inode = llu_inode_from_lock(lock);
234 struct llu_inode_info *lli;
238 lli= llu_i2info(inode);
249 ll_pgcache_remove_extent(inode, lli->lli_smd, lock);
262 int llu_extent_lock_no_validate(struct ll_file_data *fd,
264 struct lov_stripe_md *lsm,
266 struct ldlm_extent *extent,
267 struct lustre_handle *lockh,
270 struct llu_sb_info *sbi = llu_i2sbi(inode);
271 struct llu_inode_info *lli = llu_i2info(inode);
275 LASSERT(lockh->cookie == 0);
278 /* XXX phil: can we do this? won't it screw the file size up? */
279 if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
280 (sbi->ll_flags & LL_SBI_NOLCK))
284 CDEBUG(D_DLMTRACE, "Locking inode %lu, start "LPU64" end "LPU64"\n",
285 lli->lli_st_ino, extent->start, extent->end);
287 rc = obd_enqueue(sbi->ll_osc_exp, lsm, NULL, LDLM_EXTENT, extent,
288 sizeof(extent), mode, &ast_flags,
289 llu_extent_lock_callback, inode, lockh);
295 * this grabs a lock and manually implements behaviour that makes it look like
296 * the OST is returning the file size with each lock acquisition.
298 int llu_extent_lock(struct ll_file_data *fd, struct inode *inode,
299 struct lov_stripe_md *lsm, int mode,
300 struct ldlm_extent *extent, struct lustre_handle *lockh)
302 struct llu_inode_info *lli = llu_i2info(inode);
303 struct obd_export *exp = llu_i2obdexp(inode);
304 struct ldlm_extent size_lock;
305 struct lustre_handle match_lockh = {0};
306 int flags, rc, matched;
309 rc = llu_extent_lock_no_validate(fd, inode, lsm, mode, extent, lockh, 0);
313 if (test_bit(LLI_F_HAVE_OST_SIZE_LOCK, &lli->lli_flags))
316 rc = llu_inode_getattr(inode, lsm);
318 llu_extent_unlock(fd, inode, lsm, mode, lockh);
322 size_lock.start = lli->lli_st_size;
323 size_lock.end = OBD_OBJECT_EOF;
325 /* XXX I bet we should be checking the lock ignore flags.. */
326 flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED;
327 matched = obd_match(exp, lsm, LDLM_EXTENT, &size_lock,
328 sizeof(size_lock), LCK_PR, &flags, inode,
331 /* hey, alright, we hold a size lock that covers the size we
332 * just found, its not going to change for a while.. */
334 set_bit(LLI_F_HAVE_OST_SIZE_LOCK, &lli->lli_flags);
335 obd_cancel(exp, lsm, LCK_PR, &match_lockh);
341 int llu_extent_unlock(struct ll_file_data *fd, struct inode *inode,
342 struct lov_stripe_md *lsm, int mode,
343 struct lustre_handle *lockh)
345 struct llu_sb_info *sbi = llu_i2sbi(inode);
349 /* XXX phil: can we do this? won't it screw the file size up? */
350 if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
351 (sbi->ll_flags & LL_SBI_NOLCK))
354 rc = obd_cancel(sbi->ll_osc_exp, lsm, mode, lockh);
359 #define LLAP_MAGIC 12346789
361 struct ll_async_page {
365 struct page *llap_page;
366 struct inode *llap_inode;
369 static struct ll_async_page *llap_from_cookie(void *cookie)
371 struct ll_async_page *llap = cookie;
372 if (llap->llap_magic != LLAP_MAGIC)
373 return ERR_PTR(-EINVAL);
377 static void llu_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
379 struct ll_async_page *llap;
381 struct lov_stripe_md *lsm;
382 obd_flag valid_flags;
385 llap = llap_from_cookie(data);
391 inode = llap->llap_inode;
392 lsm = llu_i2info(inode)->lli_smd;
394 oa->o_id = lsm->lsm_object_id;
395 oa->o_valid = OBD_MD_FLID;
396 valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
397 if (cmd == OBD_BRW_WRITE)
398 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
400 obdo_from_inode(oa, inode, valid_flags);
404 /* called for each page in a completed rpc.*/
405 static void llu_ap_completion(void *data, int cmd, int rc)
407 struct ll_async_page *llap;
410 llap = llap_from_cookie(data);
416 llap->llap_queued = 0;
417 page = llap->llap_page;
420 if (cmd == OBD_BRW_WRITE)
421 CERROR("writeback error on page %p index %ld: %d\n",
422 page, page->index, rc);
427 static struct obd_async_page_ops llu_async_page_ops = {
428 .ap_make_ready = NULL,
429 .ap_refresh_count = NULL,
430 .ap_fill_obdo = llu_ap_fill_obdo,
431 .ap_completion = llu_ap_completion,
435 struct llu_sysio_cookie* get_sysio_cookie(struct inode *inode, int npages)
437 struct llu_sysio_cookie *cookie;
439 OBD_ALLOC(cookie, LLU_SYSIO_COOKIE_SIZE(npages));
442 cookie->lsc_inode = inode;
443 cookie->lsc_npages = npages;
444 cookie->lsc_llap = (struct ll_async_page *)(cookie + 1);
445 cookie->lsc_pages = (struct page *) (cookie->lsc_llap + npages);
447 osic_init(&cookie->lsc_osic);
454 void put_sysio_cookie(struct llu_sysio_cookie *cookie)
456 struct lov_stripe_md *lsm = llu_i2info(cookie->lsc_inode)->lli_smd;
457 struct obd_export *exp = llu_i2obdexp(cookie->lsc_inode);
458 struct ll_async_page *llap = cookie->lsc_llap;
461 for (i = 0; i< cookie->lsc_npages; i++) {
462 if (llap[i].llap_cookie)
463 obd_teardown_async_page(exp, lsm, NULL,
464 llap[i].llap_cookie);
467 I_RELE(cookie->lsc_inode);
469 osic_release(cookie->lsc_osic);
470 OBD_FREE(cookie, LLU_SYSIO_COOKIE_SIZE(cookie->lsc_npages));
474 int llu_prep_async_io(struct llu_sysio_cookie *cookie, int cmd,
475 char *buf, loff_t pos, size_t count)
477 struct lov_stripe_md *lsm = llu_i2info(cookie->lsc_inode)->lli_smd;
478 struct obd_export *exp = llu_i2obdexp(cookie->lsc_inode);
479 struct page *pages = cookie->lsc_pages;
480 struct ll_async_page *llap = cookie->lsc_llap;
481 int i, rc, npages = 0;
487 cookie->lsc_rwcount = count;
489 /* prepare the pages array */
491 unsigned long index, offset, bytes;
493 offset = (pos & ~PAGE_CACHE_MASK);
494 index = pos >> PAGE_CACHE_SHIFT;
495 bytes = PAGE_CACHE_SIZE - offset;
499 /* prepare page for this index */
500 pages[npages].index = index;
501 pages[npages].addr = buf - offset;
503 pages[npages]._offset = offset;
504 pages[npages]._count = bytes;
512 for (i = 0; i < npages; i++) {
513 llap[i].llap_magic = LLAP_MAGIC;
514 rc = obd_prep_async_page(exp, lsm, NULL, &pages[i],
515 (obd_off)pages[i].index << PAGE_SHIFT,
517 &llap[i], &llap[i].llap_cookie);
519 llap[i].llap_cookie = NULL;
522 CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n",
523 &llap[i], &pages[i], llap[i].llap_cookie,
524 (obd_off)pages[i].index << PAGE_SHIFT);
525 pages[i].private = (unsigned long)&llap[i];
526 llap[i].llap_page = &pages[i];
527 llap[i].llap_inode = cookie->lsc_inode;
529 rc = obd_queue_sync_io(exp, lsm, NULL, cookie->lsc_osic,
530 llap[i].llap_cookie, cmd,
531 pages[i]._offset, pages[i]._count, 0);
535 llap[i].llap_queued = 1;
542 int llu_start_async_io(struct llu_sysio_cookie *cookie)
544 struct lov_stripe_md *lsm = llu_i2info(cookie->lsc_inode)->lli_smd;
545 struct obd_export *exp = llu_i2obdexp(cookie->lsc_inode);
547 return obd_trigger_sync_io(exp, lsm, NULL, cookie->lsc_osic);
551 * read/write a continuous buffer for an inode (zero-copy)
553 struct llu_sysio_cookie*
554 llu_rw(int cmd, struct inode *inode, char *buf, size_t count, loff_t pos)
556 struct llu_sysio_cookie *cookie;
560 max_pages = (count >> PAGE_SHIFT) + 2;
562 cookie = get_sysio_cookie(inode, max_pages);
564 RETURN(ERR_PTR(-ENOMEM));
566 rc = llu_prep_async_io(cookie, cmd, buf, pos, count);
568 GOTO(out_cleanup, rc);
570 rc = llu_start_async_io(cookie);
572 GOTO(out_cleanup, rc);
575 rc = osic_wait(&osic);
577 CERROR("file i/o error!\n");
584 put_sysio_cookie(cookie);
588 struct llu_sysio_callback_args*
589 llu_file_write(struct inode *inode, const struct iovec *iovec,
590 size_t iovlen, loff_t pos)
592 struct llu_inode_info *lli = llu_i2info(inode);
593 struct ll_file_data *fd = lli->lli_file_data;
594 struct lustre_handle lockh = {0};
595 struct lov_stripe_md *lsm = lli->lli_smd;
596 struct llu_sysio_callback_args *lsca;
597 struct llu_sysio_cookie *cookie;
598 struct ldlm_extent extent;
603 /* XXX consider other types later */
604 if (!S_ISREG(lli->lli_st_mode))
607 LASSERT(iovlen <= MAX_IOVEC);
609 OBD_ALLOC(lsca, sizeof(*lsca));
611 RETURN(ERR_PTR(-ENOMEM));
613 /* FIXME optimize the following extent locking */
614 for (iovidx = 0; iovidx < iovlen; iovidx++) {
615 char *buf = iovec[iovidx].iov_base;
616 size_t count = iovec[iovidx].iov_len;
621 /* FIXME libsysio haven't consider the open flags
622 * such as O_APPEND */
624 if (!S_ISBLK(lli->lli_st_mode) && file->f_flags & O_APPEND) {
626 extent.end = OBD_OBJECT_EOF;
628 extent.start = *ppos;
629 extent.end = *ppos + count - 1;
633 extent.end = pos + count - 1;
636 err = llu_extent_lock(fd, inode, lsm, LCK_PW, &extent, &lockh);
638 GOTO(err_out, err = -ENOLCK);
640 CDEBUG(D_INFO, "Writing inode %lu, "LPSZ" bytes, offset %Lu\n",
641 lli->lli_st_ino, count, pos);
643 cookie = llu_rw(OBD_BRW_WRITE, inode, buf, count, pos);
644 if (!IS_ERR(cookie)) {
646 lsca->cookies[lsca->ncookies++] = cookie;
648 /* file size grow. XXX should be done here? */
649 if (pos > lli->lli_st_size) {
650 lli->lli_st_size = pos;
651 set_bit(LLI_F_PREFER_EXTENDED_SIZE,
655 llu_extent_unlock(fd, inode, lsm, LCK_PW, &lockh);
656 GOTO(err_out, err = PTR_ERR(cookie));
660 err = llu_extent_unlock(fd, inode, lsm, LCK_PW, &lockh);
662 CERROR("extent unlock error %d\n", err);
668 /* teardown all async stuff */
669 while (lsca->ncookies--) {
670 put_sysio_cookie(lsca->cookies[lsca->ncookies]);
672 OBD_FREE(lsca, sizeof(*lsca));
674 RETURN(ERR_PTR(err));
678 static void llu_update_atime(struct inode *inode)
680 struct llu_inode_info *lli = llu_i2info(inode);
685 attr.ia_atime = LTIME_S(CURRENT_TIME);
686 attr.ia_valid = ATTR_ATIME;
688 if (lli->lli_st_atime == attr.ia_atime) return;
689 if (IS_RDONLY(inode)) return;
690 if (IS_NOATIME(inode)) return;
692 /* ll_inode_setattr() sets inode->i_atime from attr.ia_atime */
693 llu_inode_setattr(inode, &attr, 0);
695 /* update atime, but don't explicitly write it out just this change */
696 inode->i_atime = CURRENT_TIME;
701 struct llu_sysio_callback_args*
702 llu_file_read(struct inode *inode, const struct iovec *iovec,
703 size_t iovlen, loff_t pos)
705 struct llu_inode_info *lli = llu_i2info(inode);
706 struct ll_file_data *fd = lli->lli_file_data;
707 struct lov_stripe_md *lsm = lli->lli_smd;
708 struct lustre_handle lockh = { 0 };
709 struct ldlm_extent extent;
710 struct llu_sysio_callback_args *lsca;
711 struct llu_sysio_cookie *cookie;
717 OBD_ALLOC(lsca, sizeof(*lsca));
719 RETURN(ERR_PTR(-ENOMEM));
721 for (iovidx = 0; iovidx < iovlen; iovidx++) {
722 char *buf = iovec[iovidx].iov_base;
723 size_t count = iovec[iovidx].iov_len;
725 /* "If nbyte is 0, read() will return 0 and have no other results."
726 * -- Single Unix Spec */
731 extent.end = pos + count - 1;
733 err = llu_extent_lock(fd, inode, lsm, LCK_PR, &extent, &lockh);
735 GOTO(err_out, err = -ENOLCK);
737 CDEBUG(D_INFO, "Reading inode %lu, "LPSZ" bytes, offset %Ld\n",
738 lli->lli_st_ino, count, pos);
740 cookie = llu_rw(OBD_BRW_READ, inode, buf, count, pos);
741 if (!IS_ERR(cookie)) {
743 lsca->cookies[lsca->ncookies++] = cookie;
746 llu_extent_unlock(fd, inode, lsm, LCK_PR, &lockh);
747 GOTO(err_out, err = PTR_ERR(cookie));
751 err = llu_extent_unlock(fd, inode, lsm, LCK_PR, &lockh);
753 CERROR("extent_unlock fail: %d\n", err);
757 llu_update_atime(inode);
762 /* teardown all async stuff */
763 while (lsca->ncookies--) {
764 put_sysio_cookie(lsca->cookies[lsca->ncookies]);
766 OBD_FREE(lsca, sizeof(*lsca));
768 RETURN(ERR_PTR(err));
771 int llu_iop_iodone(struct ioctx *ioctxp)
773 struct llu_sysio_callback_args *lsca = ioctxp->ioctx_private;
774 struct llu_sysio_cookie *cookie;
775 int i, err = 0, rc = 0;
778 /* write/read(fd, buf, 0) */
782 LASSERT(!IS_ERR(lsca));
784 for (i = 0; i < lsca->ncookies; i++) {
785 cookie = lsca->cookies[i];
787 err = osic_wait(cookie->lsc_osic);
791 ioctxp->ioctx_cc += cookie->lsc_rwcount;
792 put_sysio_cookie(cookie);
797 ioctxp->ioctx_cc = rc;
799 OBD_FREE(lsca, sizeof(*lsca));
800 ioctxp->ioctx_private = NULL;