*
*/
-/* LUSTRE_VERSION_CODE */
-#include <lustre_ver.h>
/* prerequisite for linux/xattr.h */
#include <linux/types.h>
/* prerequisite for linux/xattr.h */
}
}
-#ifndef REQ_WRITE /* pre-2.6.35 */
-#define __REQ_WRITE BIO_RW
-#endif
-
#ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
static void dio_complete_routine(struct bio *bio)
{
"(like SCSI errors, perhaps). Because bi_private is "
"NULL, I can't wake up the thread that initiated this "
"IO - you will probably have to reboot this node.\n");
- CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
- "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
- "bi_private: %p\n", bio->bi_next,
+ CERROR("bi_next: %p, bi_flags: %lx, "
+#ifdef HAVE_BI_RW
+ "bi_rw: %lu,"
+#else
+ "bi_opf: %u,"
+#endif
+ "bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p,"
+ "bi_cnt: %d, bi_private: %p\n", bio->bi_next,
(unsigned long)bio->bi_flags,
- bio->bi_rw, bio->bi_vcnt, bio_idx(bio),
+#ifdef HAVE_BI_RW
+ bio->bi_rw,
+#else
+ bio->bi_opf,
+#endif
+ bio->bi_vcnt, bio_idx(bio),
bio_sectors(bio) << 9, bio->bi_end_io,
#ifdef HAVE_BI_CNT
atomic_read(&bio->bi_cnt),
}
/* the check is outside of the cycle for performance reason -bzzz */
- if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
+ if (!bio_data_dir(bio)) {
bio_for_each_segment_all(bvl, bio, iter) {
if (likely(error == 0))
SetPageUptodate(bvl_to_page(bvl));
static void osd_submit_bio(int rw, struct bio *bio)
{
LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
+#ifdef HAVE_SUBMIT_BIO_2ARGS
if (rw == 0)
submit_bio(READ, bio);
else
submit_bio(WRITE, bio);
+#else
+ bio->bi_opf |= rw;
+ submit_bio(bio);
+#endif
}
static int can_be_merged(struct bio *bio, sector_t sector)
int page_idx;
int i;
int rc = 0;
+ DECLARE_PLUG(plug);
ENTRY;
LASSERT(iobuf->dr_npages == npages);
osd_brw_stats_update(osd, iobuf);
iobuf->dr_start_time = cfs_time_current();
+ blk_start_plug(&plug);
for (page_idx = 0, block_idx = 0;
page_idx < npages;
page_idx++, block_idx += blocks_per_page) {
bio->bi_bdev = inode->i_sb->s_bdev;
bio_set_sector(bio, sector);
+#ifdef HAVE_BI_RW
bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
+#else
+ bio->bi_opf = (iobuf->dr_rw == 0) ? READ : WRITE;
+#endif
bio->bi_end_io = dio_complete_routine;
bio->bi_private = iobuf;
}
out:
+ blk_finish_plug(&plug);
+
/* in order to achieve better IO throughput, we don't wait for writes
* completion here. instead we proceed with transaction commit in
* parallel and wait for IO completion once transaction is stopped
RETURN(0);
}
-static struct page *osd_get_page(struct dt_object *dt, loff_t offset, int rw)
+static struct page *osd_get_page(struct dt_object *dt, loff_t offset,
+ gfp_t gfp_mask)
{
- struct inode *inode = osd_dt_obj(dt)->oo_inode;
- struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
- struct page *page;
+ struct inode *inode = osd_dt_obj(dt)->oo_inode;
+ struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
+ struct page *page;
LASSERT(inode);
page = find_or_create_page(inode->i_mapping, offset >> PAGE_SHIFT,
- GFP_NOFS | __GFP_HIGHMEM);
+ gfp_mask);
+
if (unlikely(page == NULL))
lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
LASSERT(PageLocked(lnb[i].lnb_page));
unlock_page(lnb[i].lnb_page);
put_page(lnb[i].lnb_page);
- lu_object_put(env, &dt->do_lu);
+ dt_object_put(env, dt);
lnb[i].lnb_page = NULL;
}
* \param pos byte offset of IO start
* \param len number of bytes of IO
* \param lnb array of extents undergoing IO
- * \param rw read or write operation?
+ * \param rw read or write operation, and other flags
* \param capa capabilities
*
* \retval pages (zero or more) loaded successfully
*/
static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
loff_t pos, ssize_t len, struct niobuf_local *lnb,
- int rw)
+ enum dt_bufs_type rw)
{
- struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_object *obj = osd_dt_obj(dt);
int npages, i, rc = 0;
+ gfp_t gfp_mask;
LASSERT(obj->oo_inode);
osd_map_remote_to_local(pos, len, &npages, lnb);
+ /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
+ gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
+ GFP_HIGHUSER;
for (i = 0; i < npages; i++, lnb++) {
- lnb->lnb_page = osd_get_page(dt, lnb->lnb_file_offset, rw);
+ lnb->lnb_page = osd_get_page(dt, lnb->lnb_file_offset,
+ gfp_mask);
if (lnb->lnb_page == NULL)
GOTO(cleanup, rc = -ENOMEM);
if (pblock != 0) {
/* unmap any possible underlying metadata from
* the block device mapping. bug 6998. */
+#ifndef HAVE_CLEAN_BDEV_ALIASES
unmap_underlying_metadata(inode->i_sb->s_bdev,
*(bp->blocks));
+#else
+ clean_bdev_aliases(inode->i_sb->s_bdev,
+ *(bp->blocks), 1);
+#endif
}
bp->blocks++;
bp->num--;
struct page *fp = NULL;
int clen = 0;
pgoff_t max_page_index;
+ handle_t *handle = NULL;
max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
inode->i_ino, pages, (*page)->index);
+ if (create) {
+ create = LDISKFS_GET_BLOCKS_CREATE;
+ handle = ldiskfs_journal_current_handle();
+ LASSERT(handle != NULL);
+ rc = osd_attach_jinode(inode);
+ if (rc)
+ return rc;
+ }
/* pages are sorted already. so, we just have to find
* contig. space and process them properly */
while (i < pages) {
long blen, total = 0;
- handle_t *handle = NULL;
struct ldiskfs_map_blocks map = { 0 };
if (fp == NULL) { /* start new extent */
/* process found extent */
map.m_lblk = fp->index * blocks_per_page;
map.m_len = blen = clen * blocks_per_page;
- if (create) {
- create = LDISKFS_GET_BLOCKS_CREATE;
- handle = ldiskfs_journal_current_handle();
- LASSERT(handle != NULL);
- }
cont_map:
rc = ldiskfs_map_blocks(handle, inode, &map, create);
if (rc >= 0) {
* mapping. bug 6998. */
if ((map.m_flags & LDISKFS_MAP_NEW) &&
create)
+#ifndef HAVE_CLEAN_BDEV_ALIASES
unmap_underlying_metadata(
inode->i_sb->s_bdev,
map.m_pblk + c);
+#else
+ clean_bdev_aliases(
+ inode->i_sb->s_bdev,
+ map.m_pblk + c, 1);
+#endif
}
}
rc = 0;
struct niobuf_local *lnb, int npages,
struct thandle *handle)
{
- const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
- struct inode *inode = osd_dt_obj(dt)->oo_inode;
- struct osd_thandle *oh;
- int extents = 1;
- int depth;
- int i;
- int newblocks;
- int rc = 0;
- int flags = 0;
- int credits = 0;
- bool ignore_quota = false;
- long long quota_space = 0;
- struct osd_fextent extent = { 0 };
+ const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
+ struct inode *inode = osd_dt_obj(dt)->oo_inode;
+ struct osd_thandle *oh;
+ int extents = 1;
+ int depth;
+ int i;
+ int newblocks;
+ int rc = 0;
+ int flags = 0;
+ int credits = 0;
+ long long quota_space = 0;
+ struct osd_fextent extent = { 0 };
+ enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
ENTRY;
LASSERT(handle != NULL);
if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
(lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
OBD_BRW_FROM_GRANT)
- ignore_quota = true;
+ declare_flags |= OSD_QID_FORCE;
}
/*
osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
/* make sure the over quota flags were not set */
- lnb[0].lnb_flags &= ~(OBD_BRW_OVER_USRQUOTA | OBD_BRW_OVER_GRPQUOTA);
+ lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
- quota_space, oh, osd_dt_obj(dt), true,
- &flags, ignore_quota);
+ i_projid_read(inode), quota_space, oh,
+ osd_dt_obj(dt), &flags, declare_flags);
/* we need only to store the overquota flags in the first lnb for
* now, once we support multiple objects BRW, this code needs be
lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
if (flags & QUOTA_FL_OVER_GRPQUOTA)
lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
+ if (flags & QUOTA_FL_OVER_PRJQUOTA)
+ lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
RETURN(rc);
}
else
lnb[i].lnb_rc = lnb[i].lnb_len;
+ /* Bypass disk read if fail_loc is set properly */
+ if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
+ SetPageUptodate(lnb[i].lnb_page);
+
if (PageUptodate(lnb[i].lnb_page)) {
cache_hits++;
} else {
csize = min(blocksize - boffs, size);
bh = __ldiskfs_bread(NULL, inode, block, 0);
if (IS_ERR(bh)) {
- CERROR("%s: can't read %u@%llu on ino %lu: rc = %ld\n",
- LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ CERROR("%s: can't read %u@%llu on ino %lu: "
+ "rc = %ld\n", osd_ino2name(inode),
csize, *offs, inode->i_ino,
PTR_ERR(bh));
return PTR_ERR(bh);
credits = depth;
/* if not append, then split may need to modify
* existing blocks moving entries into the new ones */
- if (_pos == -1)
+ if (_pos != -1)
credits += depth;
/* blocks to store data: bitmap,gd,itself */
credits += blocks * 3;
* objects, so always set the lqi_space as 0. */
if (inode != NULL)
rc = osd_declare_inode_qid(env, i_uid_read(inode),
- i_gid_read(inode), 0, oh, obj, true,
- NULL, false);
+ i_gid_read(inode),
+ i_projid_read(inode), 0,
+ oh, obj, NULL, OSD_QID_BLK);
RETURN(rc);
}
((char *)buf)[bufsize] = '\0';
++bufsize;
}
- while (bufsize > 0) {
- if (bh != NULL)
- brelse(bh);
+
+ while (bufsize > 0) {
+ int credits = handle->h_buffer_credits;
+
+ if (bh)
+ brelse(bh);
block = offset >> inode->i_blkbits;
boffs = offset & (blocksize - 1);
err = PTR_ERR(bh);
bh = NULL;
}
- CERROR("%s: error reading offset %llu (block %lu): "
- "rc = %d\n",
- inode->i_sb->s_id, offset, block, err);
+
+ CERROR("%s: error reading offset %llu (block %lu, "
+ "size %d, offs %llu), credits %d/%d: rc = %d\n",
+ inode->i_sb->s_id, offset, block, bufsize, *offs,
+ credits, handle->h_buffer_credits, err);
break;
}
LASSERT(inode);
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
- 0, oh, osd_dt_obj(dt), true, NULL, false);
+ i_projid_read(inode), 0, oh, osd_dt_obj(dt),
+ NULL, OSD_QID_BLK);
RETURN(rc);
}