4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * Copyright (c) 2012, 2013, Intel Corporation.
32 * Use is subject to license terms.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
38 * lustre/osd-zfs/osd_io.c
40 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
41 * Author: Mike Pershin <tappro@whamcloud.com>
45 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_OSD
49 #include <lustre_ver.h>
50 #include <libcfs/libcfs.h>
51 #include <lustre_fsfilt.h>
52 #include <obd_support.h>
53 #include <lustre_net.h>
55 #include <obd_class.h>
56 #include <lustre_disk.h>
57 #include <lustre_fid.h>
59 #include "osd_internal.h"
61 #include <sys/dnode.h>
66 #include <sys/spa_impl.h>
67 #include <sys/zfs_znode.h>
68 #include <sys/dmu_tx.h>
69 #include <sys/dmu_objset.h>
70 #include <sys/dsl_prop.h>
71 #include <sys/sa_impl.h>
74 static char *osd_zerocopy_tag = "zerocopy";
76 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
77 struct lu_buf *buf, loff_t *pos,
78 struct lustre_capa *capa)
80 struct osd_object *obj = osd_dt_obj(dt);
81 struct osd_device *osd = osd_obj2dev(obj);
83 int size = buf->lb_len;
86 LASSERT(dt_object_exists(dt));
89 read_lock(&obj->oo_attr_lock);
90 old_size = obj->oo_attr.la_size;
91 read_unlock(&obj->oo_attr_lock);
93 if (*pos + size > old_size) {
97 size = old_size - *pos;
100 rc = -dmu_read(osd->od_objset.os, obj->oo_db->db_object, *pos, size,
101 buf->lb_buf, DMU_READ_PREFETCH);
106 /* XXX: workaround for bug in HEAD: fsfilt_ldiskfs_read() returns
107 * requested number of bytes, not actually read ones */
108 if (S_ISLNK(obj->oo_dt.do_lu.lo_header->loh_attr))
114 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
115 const loff_t size, loff_t pos,
118 struct osd_object *obj = osd_dt_obj(dt);
119 struct osd_device *osd = osd_obj2dev(obj);
120 struct osd_thandle *oh;
124 oh = container_of0(th, struct osd_thandle, ot_super);
126 /* in some cases declare can race with creation (e.g. llog)
127 * and we need to wait till object is initialized. notice
128 * LOHA_EXISTs is supposed to be the last step in the
131 /* declare possible size change. notice we can't check
132 * current size here as another thread can change it */
134 if (dt_object_exists(dt)) {
136 oid = obj->oo_db->db_object;
138 dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
140 oid = DMU_NEW_OBJECT;
141 dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
144 dmu_tx_hold_write(oh->ot_tx, oid, pos, size);
146 /* dt_declare_write() is usually called for system objects, such
147 * as llog or last_rcvd files. We needn't enforce quota on those
148 * objects, so always set the lqi_space as 0. */
149 RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
150 obj->oo_attr.la_gid, 0, oh, true, NULL,
154 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
155 const struct lu_buf *buf, loff_t *pos,
156 struct thandle *th, struct lustre_capa *capa,
159 struct osd_object *obj = osd_dt_obj(dt);
160 struct osd_device *osd = osd_obj2dev(obj);
161 udmu_objset_t *uos = &osd->od_objset;
162 struct osd_thandle *oh;
163 uint64_t offset = *pos;
167 LASSERT(dt_object_exists(dt));
171 oh = container_of0(th, struct osd_thandle, ot_super);
173 dmu_write(osd->od_objset.os, obj->oo_db->db_object, offset,
174 (uint64_t)buf->lb_len, buf->lb_buf, oh->ot_tx);
175 write_lock(&obj->oo_attr_lock);
176 if (obj->oo_attr.la_size < offset + buf->lb_len) {
177 obj->oo_attr.la_size = offset + buf->lb_len;
178 write_unlock(&obj->oo_attr_lock);
179 /* osd_object_sa_update() will be copying directly from oo_attr
180 * into dbuf. any update within a single txg will copy the
182 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
183 &obj->oo_attr.la_size, 8, oh);
187 write_unlock(&obj->oo_attr_lock);
198 * XXX: for the moment I don't want to use lnb_flags for osd-internal
199 * purposes as it's not very well defined ...
200 * instead I use the lowest bit of the address so that:
201 * arc buffer: .lnb_obj = abuf (arc we loan for write)
202 * dbuf buffer: .lnb_obj = dbuf | 1 (dbuf we get for read)
203 * copy buffer: .lnb_page->mapping = obj (page we allocate for write)
207 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
208 struct niobuf_local *lnb, int npages)
210 struct osd_object *obj = osd_dt_obj(dt);
211 struct osd_device *osd = osd_obj2dev(obj);
215 LASSERT(dt_object_exists(dt));
218 for (i = 0; i < npages; i++) {
219 if (lnb[i].page == NULL)
221 if (lnb[i].page->mapping == (void *)obj) {
222 /* this is anonymous page allocated for copy-write */
223 lnb[i].page->mapping = NULL;
224 __free_page(lnb[i].page);
225 cfs_atomic_dec(&osd->od_zerocopy_alloc);
227 /* see comment in osd_bufs_get_read() */
228 ptr = (unsigned long)lnb[i].dentry;
231 dmu_buf_rele((void *)ptr, osd_zerocopy_tag);
232 cfs_atomic_dec(&osd->od_zerocopy_pin);
233 } else if (lnb[i].dentry != NULL) {
234 dmu_return_arcbuf((void *)lnb[i].dentry);
235 cfs_atomic_dec(&osd->od_zerocopy_loan);
239 lnb[i].dentry = NULL;
245 static struct page *kmem_to_page(void *addr)
250 page = vmalloc_to_page(addr);
252 page = virt_to_page(addr);
257 static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
258 loff_t off, ssize_t len, struct niobuf_local *lnb)
260 struct osd_device *osd = osd_obj2dev(obj);
262 int rc, i, numbufs, npages = 0;
265 /* grab buffers for read:
266 * OSD API let us to grab buffers first, then initiate IO(s)
267 * so that all required IOs will be done in parallel, but at the
268 * moment DMU doesn't provide us with a method to grab buffers.
269 * If we discover this is a vital for good performance we
270 * can get own replacement for dmu_buf_hold_array_by_bonus().
273 rc = -dmu_buf_hold_array_by_bonus(obj->oo_db, off, len, TRUE,
274 osd_zerocopy_tag, &numbufs,
279 for (i = 0; i < numbufs; i++) {
280 int bufoff, tocpy, thispage;
285 cfs_atomic_inc(&osd->od_zerocopy_pin);
287 bufoff = off - dbp[i]->db_offset;
288 tocpy = min_t(int, dbp[i]->db_size - bufoff, len);
290 /* kind of trick to differentiate dbuf vs. arcbuf */
291 LASSERT(((unsigned long)dbp[i] & 1) == 0);
292 dbf = (void *) ((unsigned long)dbp[i] | 1);
295 thispage = CFS_PAGE_SIZE;
296 thispage -= bufoff & (CFS_PAGE_SIZE - 1);
297 thispage = min(tocpy, thispage);
300 lnb->lnb_file_offset = off;
301 lnb->lnb_page_offset = bufoff & ~CFS_PAGE_MASK;
303 lnb->page = kmem_to_page(dbp[i]->db_data +
305 /* mark just a single slot: we need this
306 * reference to dbuf to be release once */
319 /* steal dbuf so dmu_buf_rele_array() cant release it */
323 dmu_buf_rele_array(dbp, numbufs, osd_zerocopy_tag);
330 osd_bufs_put(env, &obj->oo_dt, lnb - npages, npages);
334 static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj,
335 loff_t off, ssize_t len, struct niobuf_local *lnb)
337 struct osd_device *osd = osd_obj2dev(obj);
338 int plen, off_in_block, sz_in_block;
339 int i = 0, npages = 0;
345 dmu_object_size_from_db(obj->oo_db, &bs, &dummy);
348 * currently only full blocks are subject to zerocopy approach:
349 * so that we're sure nobody is trying to update the same block
352 LASSERT(npages < PTLRPC_MAX_BRW_PAGES);
354 off_in_block = off & (bs - 1);
355 sz_in_block = min_t(int, bs - off_in_block, len);
357 if (sz_in_block == bs) {
358 /* full block, try to use zerocopy */
360 abuf = dmu_request_arcbuf(obj->oo_db, bs);
361 if (unlikely(abuf == NULL))
362 GOTO(out_err, -ENOMEM);
364 cfs_atomic_inc(&osd->od_zerocopy_loan);
366 /* go over pages arcbuf contains, put them as
367 * local niobufs for ptlrpc's bulks */
368 while (sz_in_block > 0) {
369 plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);
371 lnb[i].lnb_file_offset = off;
372 lnb[i].lnb_page_offset = 0;
375 if (sz_in_block == bs)
376 lnb[i].dentry = (void *)abuf;
378 lnb[i].dentry = NULL;
380 /* this one is not supposed to fail */
381 lnb[i].page = kmem_to_page(abuf->b_data +
383 LASSERT(lnb[i].page);
385 lprocfs_counter_add(osd->od_stats,
386 LPROC_OSD_ZEROCOPY_IO, 1);
391 off_in_block += plen;
396 if (off_in_block == 0 && len < bs &&
397 off + len >= obj->oo_attr.la_size)
398 lprocfs_counter_add(osd->od_stats,
399 LPROC_OSD_TAIL_IO, 1);
401 /* can't use zerocopy, allocate temp. buffers */
402 while (sz_in_block > 0) {
403 plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);
405 lnb[i].lnb_file_offset = off;
406 lnb[i].lnb_page_offset = 0;
409 lnb[i].dentry = NULL;
411 lnb[i].page = alloc_page(OSD_GFP_IO);
412 if (unlikely(lnb[i].page == NULL))
413 GOTO(out_err, -ENOMEM);
415 LASSERT(lnb[i].page->mapping == NULL);
416 lnb[i].page->mapping = (void *)obj;
418 cfs_atomic_inc(&osd->od_zerocopy_alloc);
419 lprocfs_counter_add(osd->od_stats,
420 LPROC_OSD_COPY_IO, 1);
434 osd_bufs_put(env, &obj->oo_dt, lnb, npages);
438 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
439 loff_t offset, ssize_t len, struct niobuf_local *lnb,
440 int rw, struct lustre_capa *capa)
442 struct osd_object *obj = osd_dt_obj(dt);
445 LASSERT(dt_object_exists(dt));
449 rc = osd_bufs_get_read(env, obj, offset, len, lnb);
451 rc = osd_bufs_get_write(env, obj, offset, len, lnb);
456 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
457 struct niobuf_local *lnb, int npages)
459 struct osd_object *obj = osd_dt_obj(dt);
461 LASSERT(dt_object_exists(dt));
467 /* Return number of blocks that aren't mapped in the [start, start + size]
469 static int osd_count_not_mapped(struct osd_object *obj, uint64_t start,
472 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)obj->oo_db;
483 if (dn->dn_maxblkid == 0) {
484 if (start + size <= dn->dn_datablksz)
486 if (start < dn->dn_datablksz)
487 start = dn->dn_datablksz;
488 /* assume largest block size */
489 blkshift = SPA_MAXBLOCKSHIFT;
491 /* blocksize can't change */
492 blkshift = dn->dn_datablkshift;
495 /* compute address of last block */
496 end = (start + size - 1) >> blkshift;
497 /* align start on block boundaries */
500 /* size is null, can't be mapped */
501 if (obj->oo_attr.la_size == 0 || dn->dn_maxblkid == 0)
502 GOTO(out, size = (end - start + 1) << blkshift);
504 /* beyond EOF, can't be mapped */
505 if (start > dn->dn_maxblkid)
506 GOTO(out, size = (end - start + 1) << blkshift);
509 for (blkid = start; blkid <= end; blkid++) {
510 if (blkid == dn->dn_maxblkid)
511 /* this one is mapped for sure */
513 if (blkid > dn->dn_maxblkid) {
514 size += (end - blkid + 1) << blkshift;
518 rc = dbuf_hold_impl(dn, 0, blkid, TRUE, FTAG, &db);
520 /* for ENOENT (block not mapped) and any other errors,
521 * assume the block isn't mapped */
522 size += 1 << blkshift;
534 static int osd_declare_write_commit(const struct lu_env *env,
535 struct dt_object *dt,
536 struct niobuf_local *lnb, int npages,
539 struct osd_object *obj = osd_dt_obj(dt);
540 struct osd_device *osd = osd_obj2dev(obj);
541 struct osd_thandle *oh;
544 int i, rc, flags = 0;
545 bool ignore_quota = false, synced = false;
549 LASSERT(dt_object_exists(dt));
555 oh = container_of0(th, struct osd_thandle, ot_super);
557 for (i = 0; i < npages; i++) {
559 /* ENOSPC, network RPC error, etc.
560 * We don't want to book space for pages which will be
561 * skipped in osd_write_commit(). Hence we skip pages
562 * with lnb_rc != 0 here too */
564 /* ignore quota for the whole request if any page is from
565 * client cache or written by root.
567 * XXX once we drop the 1.8 client support, the checking
568 * for whether page is from cache can be simplified as:
569 * !(lnb[i].flags & OBD_BRW_SYNC)
571 * XXX we could handle this on per-lnb basis as done by
573 if ((lnb[i].flags & OBD_BRW_NOQUOTA) ||
574 (lnb[i].flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
578 /* first valid lnb */
579 offset = lnb[i].lnb_file_offset;
583 if (offset + size == lnb[i].lnb_file_offset) {
584 /* this lnb is contiguous to the previous one */
589 dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object, offset,size);
591 /* estimating space that will be consumed by a write is rather
592 * complicated with ZFS. As a consequence, we don't account for
593 * indirect blocks and quota overrun will be adjusted once the
594 * operation is committed, if required. */
595 space += osd_count_not_mapped(obj, offset, size);
597 offset = lnb->lnb_file_offset;
602 dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object, offset,size);
603 space += osd_count_not_mapped(obj, offset, size);
606 dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
608 oh->ot_write_commit = 1; /* used in osd_trans_start() for fail_loc */
610 /* backend zfs filesystem might be configured to store multiple data
612 space *= osd->od_objset.os->os_copies;
614 CDEBUG(D_QUOTA, "writting %d pages, reserving "LPD64"K of quota "
615 "space\n", npages, space);
618 /* acquire quota space if needed */
619 rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
620 obj->oo_attr.la_gid, space, oh, true, &flags,
623 if (!synced && rc == -EDQUOT && (flags & QUOTA_FL_SYNC) != 0) {
624 dt_sync(env, th->th_dev);
626 CDEBUG(D_QUOTA, "retry after sync\n");
631 /* we need only to store the overquota flags in the first lnb for
632 * now, once we support multiple objects BRW, this code needs be
634 if (flags & QUOTA_FL_OVER_USRQUOTA)
635 lnb[0].flags |= OBD_BRW_OVER_USRQUOTA;
636 if (flags & QUOTA_FL_OVER_GRPQUOTA)
637 lnb[0].flags |= OBD_BRW_OVER_GRPQUOTA;
642 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
643 struct niobuf_local *lnb, int npages,
646 struct osd_object *obj = osd_dt_obj(dt);
647 struct osd_device *osd = osd_obj2dev(obj);
648 udmu_objset_t *uos = &osd->od_objset;
649 struct osd_thandle *oh;
650 uint64_t new_size = 0;
654 LASSERT(dt_object_exists(dt));
658 oh = container_of0(th, struct osd_thandle, ot_super);
660 for (i = 0; i < npages; i++) {
661 CDEBUG(D_INODE, "write %u bytes at %u\n",
662 (unsigned) lnb[i].len,
663 (unsigned) lnb[i].lnb_file_offset);
666 /* ENOSPC, network RPC error, etc.
667 * Unlike ldiskfs, zfs allocates new blocks on rewrite,
668 * so we skip this page if lnb_rc is set to -ENOSPC */
669 CDEBUG(D_INODE, "obj "DFID": skipping lnb[%u]: rc=%d\n",
670 PFID(lu_object_fid(&dt->do_lu)), i,
675 if (lnb[i].page->mapping == (void *)obj) {
676 dmu_write(osd->od_objset.os, obj->oo_db->db_object,
677 lnb[i].lnb_file_offset, lnb[i].len,
678 kmap(lnb[i].page), oh->ot_tx);
680 } else if (lnb[i].dentry) {
681 LASSERT(((unsigned long)lnb[i].dentry & 1) == 0);
682 /* buffer loaned for zerocopy, try to use it.
683 * notice that dmu_assign_arcbuf() is smart
684 * enough to recognize changed blocksize
685 * in this case it fallbacks to dmu_write() */
686 dmu_assign_arcbuf(obj->oo_db, lnb[i].lnb_file_offset,
687 (void *)lnb[i].dentry, oh->ot_tx);
688 /* drop the reference, otherwise osd_put_bufs()
689 * will be releasing it - bad! */
690 lnb[i].dentry = NULL;
691 cfs_atomic_dec(&osd->od_zerocopy_loan);
694 if (new_size < lnb[i].lnb_file_offset + lnb[i].len)
695 new_size = lnb[i].lnb_file_offset + lnb[i].len;
698 if (unlikely(new_size == 0)) {
699 /* no pages to write, no transno is needed */
701 /* it is important to return 0 even when all lnb_rc == -ENOSPC
702 * since ofd_commitrw_write() retries several times on ENOSPC */
706 write_lock(&obj->oo_attr_lock);
707 if (obj->oo_attr.la_size < new_size) {
708 obj->oo_attr.la_size = new_size;
709 write_unlock(&obj->oo_attr_lock);
710 /* osd_object_sa_update() will be copying directly from
711 * oo_attr into dbuf. any update within a single txg will copy
713 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
714 &obj->oo_attr.la_size, 8, oh);
716 write_unlock(&obj->oo_attr_lock);
722 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
723 struct niobuf_local *lnb, int npages)
725 struct osd_object *obj = osd_dt_obj(dt);
730 LASSERT(dt_object_exists(dt));
733 for (i = 0; i < npages; i++) {
734 buf.lb_buf = kmap(lnb[i].page);
735 buf.lb_len = lnb[i].len;
736 offset = lnb[i].lnb_file_offset;
738 CDEBUG(D_OTHER, "read %u bytes at %u\n",
739 (unsigned) lnb[i].len,
740 (unsigned) lnb[i].lnb_file_offset);
741 lnb[i].rc = osd_read(env, dt, &buf, &offset, NULL);
744 if (lnb[i].rc < buf.lb_len) {
745 /* all subsequent rc should be 0 */
756 * Punch/truncate an object
758 * IN: db - dmu_buf of the object to free data in.
759 * off - start of section to free.
760 * len - length of section to free (DMU_OBJECT_END => to EOF).
762 * RETURN: 0 if success
763 * error code if failure
765 * The transaction passed to this routine must have
766 * dmu_tx_hold_sa() and if off < size, dmu_tx_hold_free()
767 * called and then assigned to a transaction group.
769 static int __osd_object_punch(objset_t *os, dmu_buf_t *db, dmu_tx_t *tx,
770 uint64_t size, uint64_t off, uint64_t len)
774 /* Assert that the transaction has been assigned to a
775 transaction group. */
776 LASSERT(tx->tx_txg != 0);
778 * Nothing to do if file already at desired length.
780 if (len == DMU_OBJECT_END && size == off)
784 rc = -dmu_free_range(os, db->db_object, off, len, tx);
789 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
790 __u64 start, __u64 end, struct thandle *th,
791 struct lustre_capa *capa)
793 struct osd_object *obj = osd_dt_obj(dt);
794 struct osd_device *osd = osd_obj2dev(obj);
795 udmu_objset_t *uos = &osd->od_objset;
796 struct osd_thandle *oh;
801 LASSERT(dt_object_exists(dt));
802 LASSERT(osd_invariant(obj));
805 oh = container_of0(th, struct osd_thandle, ot_super);
807 write_lock(&obj->oo_attr_lock);
809 if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
810 len = DMU_OBJECT_END;
813 write_unlock(&obj->oo_attr_lock);
815 rc = __osd_object_punch(osd->od_objset.os, obj->oo_db, oh->ot_tx,
816 obj->oo_attr.la_size, start, len);
818 if (len == DMU_OBJECT_END) {
819 write_lock(&obj->oo_attr_lock);
820 obj->oo_attr.la_size = start;
821 write_unlock(&obj->oo_attr_lock);
822 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
823 &obj->oo_attr.la_size, 8, oh);
828 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
829 __u64 start, __u64 end, struct thandle *handle)
831 struct osd_object *obj = osd_dt_obj(dt);
832 struct osd_device *osd = osd_obj2dev(obj);
833 struct osd_thandle *oh;
837 oh = container_of0(handle, struct osd_thandle, ot_super);
839 read_lock(&obj->oo_attr_lock);
840 if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
841 len = DMU_OBJECT_END;
845 /* declare we'll free some blocks ... */
846 if (start < obj->oo_attr.la_size) {
847 read_unlock(&obj->oo_attr_lock);
848 dmu_tx_hold_free(oh->ot_tx, obj->oo_db->db_object, start, len);
850 read_unlock(&obj->oo_attr_lock);
853 /* ... and we'll modify size attribute */
854 dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
856 RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
857 obj->oo_attr.la_gid, 0, oh, true, NULL,
862 struct dt_body_operations osd_body_ops = {
863 .dbo_read = osd_read,
864 .dbo_declare_write = osd_declare_write,
865 .dbo_write = osd_write,
866 .dbo_bufs_get = osd_bufs_get,
867 .dbo_bufs_put = osd_bufs_put,
868 .dbo_write_prep = osd_write_prep,
869 .dbo_declare_write_commit = osd_declare_write_commit,
870 .dbo_write_commit = osd_write_commit,
871 .dbo_read_prep = osd_read_prep,
872 .dbo_declare_punch = osd_declare_punch,
873 .dbo_punch = osd_punch,