* Use is subject to license terms.
*/
/*
- * Copyright (c) 2011, 2012 Whamcloud, Inc.
+ * Copyright (c) 2012, 2013, Intel Corporation.
* Use is subject to license terms.
*/
/*
* Author: Mike Pershin <tappro@whamcloud.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_OSD
#include <lustre_ver.h>
#include <libcfs/libcfs.h>
-#include <lustre_fsfilt.h>
#include <obd_support.h>
#include <lustre_net.h>
#include <obd.h>
}
static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
- const loff_t size, loff_t pos,
+ const struct lu_buf *buf, loff_t pos,
struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
}
- dmu_tx_hold_write(oh->ot_tx, oid, pos, size);
+ dmu_tx_hold_write(oh->ot_tx, oid, pos, buf->lb_len);
/* dt_declare_write() is usually called for system objects, such
* as llog or last_rcvd files. We needn't enforce quota on those
/* this is anonymous page allocated for copy-write */
lnb[i].page->mapping = NULL;
__free_page(lnb[i].page);
- cfs_atomic_dec(&osd->od_zerocopy_alloc);
+ atomic_dec(&osd->od_zerocopy_alloc);
} else {
/* see comment in osd_bufs_get_read() */
ptr = (unsigned long)lnb[i].dentry;
if (ptr & 1UL) {
ptr &= ~1UL;
dmu_buf_rele((void *)ptr, osd_zerocopy_tag);
- cfs_atomic_dec(&osd->od_zerocopy_pin);
+ atomic_dec(&osd->od_zerocopy_pin);
} else if (lnb[i].dentry != NULL) {
dmu_return_arcbuf((void *)lnb[i].dentry);
- cfs_atomic_dec(&osd->od_zerocopy_loan);
+ atomic_dec(&osd->od_zerocopy_loan);
}
}
lnb[i].page = NULL;
return 0;
}
-static struct page *kmem_to_page(void *addr)
+static inline struct page *kmem_to_page(void *addr)
{
- struct page *page;
-
- if (kmem_virt(addr))
- page = vmalloc_to_page(addr);
+ if (is_vmalloc_addr(addr))
+ return vmalloc_to_page(addr);
else
- page = virt_to_page(addr);
-
- return page;
+ return virt_to_page(addr);
}
static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
*/
while (len > 0) {
rc = -dmu_buf_hold_array_by_bonus(obj->oo_db, off, len, TRUE,
- osd_zerocopy_tag, &numbufs,
- &dbp);
- LASSERT(rc == 0);
+ osd_zerocopy_tag, &numbufs,
+ &dbp);
+ if (unlikely(rc))
+ GOTO(err, rc);
for (i = 0; i < numbufs; i++) {
int bufoff, tocpy, thispage;
LASSERT(len > 0);
- cfs_atomic_inc(&osd->od_zerocopy_pin);
+ atomic_inc(&osd->od_zerocopy_pin);
bufoff = off - dbp[i]->db_offset;
tocpy = min_t(int, dbp[i]->db_size - bufoff, len);
dbf = (void *) ((unsigned long)dbp[i] | 1);
while (tocpy > 0) {
- thispage = CFS_PAGE_SIZE;
- thispage -= bufoff & (CFS_PAGE_SIZE - 1);
+ thispage = PAGE_CACHE_SIZE;
+ thispage -= bufoff & (PAGE_CACHE_SIZE - 1);
thispage = min(tocpy, thispage);
lnb->rc = 0;
}
RETURN(npages);
+
+err:
+ LASSERT(rc < 0);
+ osd_bufs_put(env, &obj->oo_dt, lnb - npages, npages);
+ RETURN(rc);
}
static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj,
{
struct osd_device *osd = osd_obj2dev(obj);
int plen, off_in_block, sz_in_block;
- int i = 0, npages = 0;
+ int rc, i = 0, npages = 0;
arc_buf_t *abuf;
uint32_t bs;
uint64_t dummy;
abuf = dmu_request_arcbuf(obj->oo_db, bs);
if (unlikely(abuf == NULL))
- GOTO(out_err, -ENOMEM);
+ GOTO(out_err, rc = -ENOMEM);
- cfs_atomic_inc(&osd->od_zerocopy_loan);
+ atomic_inc(&osd->od_zerocopy_loan);
/* go over pages arcbuf contains, put them as
* local niobufs for ptlrpc's bulks */
while (sz_in_block > 0) {
- plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);
+ plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
lnb[i].lnb_file_offset = off;
lnb[i].lnb_page_offset = 0;
/* can't use zerocopy, allocate temp. buffers */
while (sz_in_block > 0) {
- plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);
+ plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
lnb[i].lnb_file_offset = off;
lnb[i].lnb_page_offset = 0;
lnb[i].page = alloc_page(OSD_GFP_IO);
if (unlikely(lnb[i].page == NULL))
- GOTO(out_err, -ENOMEM);
+ GOTO(out_err, rc = -ENOMEM);
LASSERT(lnb[i].page->mapping == NULL);
lnb[i].page->mapping = (void *)obj;
- cfs_atomic_inc(&osd->od_zerocopy_alloc);
+ atomic_inc(&osd->od_zerocopy_alloc);
lprocfs_counter_add(osd->od_stats,
LPROC_OSD_COPY_IO, 1);
out_err:
osd_bufs_put(env, &obj->oo_dt, lnb, npages);
- RETURN(-ENOMEM);
+ RETURN(rc);
}
static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
/* ignore quota for the whole request if any page is from
* client cache or written by root.
*
+ * XXX once we drop the 1.8 client support, the checking
+ * for whether page is from cache can be simplified as:
+ * !(lnb[i].flags & OBD_BRW_SYNC)
+ *
* XXX we could handle this on per-lnb basis as done by
* grant. */
if ((lnb[i].flags & OBD_BRW_NOQUOTA) ||
- !(lnb[i].flags & OBD_BRW_SYNC))
+ (lnb[i].flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
+ OBD_BRW_FROM_GRANT)
ignore_quota = true;
if (size == 0) {
/* first valid lnb */
/* drop the reference, otherwise osd_put_bufs()
* will be releasing it - bad! */
lnb[i].dentry = NULL;
- cfs_atomic_dec(&osd->od_zerocopy_loan);
+ atomic_dec(&osd->od_zerocopy_loan);
}
if (new_size < lnb[i].lnb_file_offset + lnb[i].len)
.dbo_declare_write_commit = osd_declare_write_commit,
.dbo_write_commit = osd_write_commit,
.dbo_read_prep = osd_read_prep,
- .do_declare_punch = osd_declare_punch,
- .do_punch = osd_punch,
+ .dbo_declare_punch = osd_declare_punch,
+ .dbo_punch = osd_punch,
};