4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * Copyright (c) 2012, Intel Corporation.
32 * Use is subject to license terms.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
38 * lustre/osd-zfs/osd_io.c
40 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
41 * Author: Mike Pershin <tappro@whamcloud.com>
45 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_OSD
49 #include <lustre_ver.h>
50 #include <libcfs/libcfs.h>
51 #include <lustre_fsfilt.h>
52 #include <obd_support.h>
53 #include <lustre_net.h>
55 #include <obd_class.h>
56 #include <lustre_disk.h>
57 #include <lustre_fid.h>
59 #include "osd_internal.h"
61 #include <sys/dnode.h>
66 #include <sys/spa_impl.h>
67 #include <sys/zfs_znode.h>
68 #include <sys/dmu_tx.h>
69 #include <sys/dmu_objset.h>
70 #include <sys/dsl_prop.h>
71 #include <sys/sa_impl.h>
74 static char *osd_zerocopy_tag = "zerocopy";
76 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
77 struct lu_buf *buf, loff_t *pos,
78 struct lustre_capa *capa)
80 struct osd_object *obj = osd_dt_obj(dt);
81 struct osd_device *osd = osd_obj2dev(obj);
83 int size = buf->lb_len;
86 LASSERT(dt_object_exists(dt));
89 read_lock(&obj->oo_attr_lock);
90 old_size = obj->oo_attr.la_size;
91 read_unlock(&obj->oo_attr_lock);
93 if (*pos + size > old_size) {
97 size = old_size - *pos;
100 rc = -dmu_read(osd->od_objset.os, obj->oo_db->db_object, *pos, size,
101 buf->lb_buf, DMU_READ_PREFETCH);
106 /* XXX: workaround for bug in HEAD: fsfilt_ldiskfs_read() returns
107 * requested number of bytes, not actually read ones */
108 if (S_ISLNK(obj->oo_dt.do_lu.lo_header->loh_attr))
114 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
115 const loff_t size, loff_t pos,
118 struct osd_object *obj = osd_dt_obj(dt);
119 struct osd_device *osd = osd_obj2dev(obj);
120 struct osd_thandle *oh;
124 oh = container_of0(th, struct osd_thandle, ot_super);
126 /* in some cases declare can race with creation (e.g. llog)
127 * and we need to wait till object is initialized. notice
128 * LOHA_EXISTs is supposed to be the last step in the
131 /* declare possible size change. notice we can't check
132 * current size here as another thread can change it */
134 if (dt_object_exists(dt)) {
136 oid = obj->oo_db->db_object;
138 dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
140 oid = DMU_NEW_OBJECT;
141 dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
144 dmu_tx_hold_write(oh->ot_tx, oid, pos, size);
146 /* dt_declare_write() is usually called for system objects, such
147 * as llog or last_rcvd files. We needn't enforce quota on those
148 * objects, so always set the lqi_space as 0. */
149 RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
150 obj->oo_attr.la_gid, 0, oh, true, NULL,
154 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
155 const struct lu_buf *buf, loff_t *pos,
156 struct thandle *th, struct lustre_capa *capa,
159 struct osd_object *obj = osd_dt_obj(dt);
160 struct osd_device *osd = osd_obj2dev(obj);
161 udmu_objset_t *uos = &osd->od_objset;
162 struct osd_thandle *oh;
163 uint64_t offset = *pos;
167 LASSERT(dt_object_exists(dt));
171 oh = container_of0(th, struct osd_thandle, ot_super);
173 dmu_write(osd->od_objset.os, obj->oo_db->db_object, offset,
174 (uint64_t)buf->lb_len, buf->lb_buf, oh->ot_tx);
175 write_lock(&obj->oo_attr_lock);
176 if (obj->oo_attr.la_size < offset + buf->lb_len) {
177 obj->oo_attr.la_size = offset + buf->lb_len;
178 write_unlock(&obj->oo_attr_lock);
179 /* osd_object_sa_update() will be copying directly from oo_attr
180 * into dbuf. any update within a single txg will copy the
182 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
183 &obj->oo_attr.la_size, 8, oh);
187 write_unlock(&obj->oo_attr_lock);
198 * XXX: for the moment I don't want to use lnb_flags for osd-internal
199 * purposes as it's not very well defined ...
200 * instead I use the lowest bit of the address so that:
201 * arc buffer: .lnb_obj = abuf (arc we loan for write)
202 * dbuf buffer: .lnb_obj = dbuf | 1 (dbuf we get for read)
203 * copy buffer: .lnb_page->mapping = obj (page we allocate for write)
207 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
208 struct niobuf_local *lnb, int npages)
210 struct osd_object *obj = osd_dt_obj(dt);
211 struct osd_device *osd = osd_obj2dev(obj);
215 LASSERT(dt_object_exists(dt));
218 for (i = 0; i < npages; i++) {
219 if (lnb[i].page == NULL)
221 if (lnb[i].page->mapping == (void *)obj) {
222 /* this is anonymous page allocated for copy-write */
223 lnb[i].page->mapping = NULL;
224 __free_page(lnb[i].page);
225 cfs_atomic_dec(&osd->od_zerocopy_alloc);
227 /* see comment in osd_bufs_get_read() */
228 ptr = (unsigned long)lnb[i].dentry;
231 dmu_buf_rele((void *)ptr, osd_zerocopy_tag);
232 cfs_atomic_dec(&osd->od_zerocopy_pin);
233 } else if (lnb[i].dentry != NULL) {
234 dmu_return_arcbuf((void *)lnb[i].dentry);
235 cfs_atomic_dec(&osd->od_zerocopy_loan);
239 lnb[i].dentry = NULL;
245 static struct page *kmem_to_page(void *addr)
250 page = vmalloc_to_page(addr);
252 page = virt_to_page(addr);
257 static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
258 loff_t off, ssize_t len, struct niobuf_local *lnb)
260 struct osd_device *osd = osd_obj2dev(obj);
262 int rc, i, numbufs, npages = 0;
265 /* grab buffers for read:
266 * OSD API let us to grab buffers first, then initiate IO(s)
267 * so that all required IOs will be done in parallel, but at the
268 * moment DMU doesn't provide us with a method to grab buffers.
269 * If we discover this is a vital for good performance we
270 * can get own replacement for dmu_buf_hold_array_by_bonus().
273 rc = -dmu_buf_hold_array_by_bonus(obj->oo_db, off, len, TRUE,
274 osd_zerocopy_tag, &numbufs,
278 for (i = 0; i < numbufs; i++) {
279 int bufoff, tocpy, thispage;
284 cfs_atomic_inc(&osd->od_zerocopy_pin);
286 bufoff = off - dbp[i]->db_offset;
287 tocpy = min_t(int, dbp[i]->db_size - bufoff, len);
289 /* kind of trick to differentiate dbuf vs. arcbuf */
290 LASSERT(((unsigned long)dbp[i] & 1) == 0);
291 dbf = (void *) ((unsigned long)dbp[i] | 1);
294 thispage = CFS_PAGE_SIZE;
295 thispage -= bufoff & (CFS_PAGE_SIZE - 1);
296 thispage = min(tocpy, thispage);
299 lnb->lnb_file_offset = off;
300 lnb->lnb_page_offset = bufoff & ~CFS_PAGE_MASK;
302 lnb->page = kmem_to_page(dbp[i]->db_data +
304 /* mark just a single slot: we need this
305 * reference to dbuf to be release once */
318 /* steal dbuf so dmu_buf_rele_array() cant release it */
322 dmu_buf_rele_array(dbp, numbufs, osd_zerocopy_tag);
328 static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj,
329 loff_t off, ssize_t len, struct niobuf_local *lnb)
331 struct osd_device *osd = osd_obj2dev(obj);
332 int plen, off_in_block, sz_in_block;
333 int i = 0, npages = 0;
339 dmu_object_size_from_db(obj->oo_db, &bs, &dummy);
342 * currently only full blocks are subject to zerocopy approach:
343 * so that we're sure nobody is trying to update the same block
346 LASSERT(npages < PTLRPC_MAX_BRW_PAGES);
348 off_in_block = off & (bs - 1);
349 sz_in_block = min_t(int, bs - off_in_block, len);
351 if (sz_in_block == bs) {
352 /* full block, try to use zerocopy */
354 abuf = dmu_request_arcbuf(obj->oo_db, bs);
355 if (unlikely(abuf == NULL))
356 GOTO(out_err, -ENOMEM);
358 cfs_atomic_inc(&osd->od_zerocopy_loan);
360 /* go over pages arcbuf contains, put them as
361 * local niobufs for ptlrpc's bulks */
362 while (sz_in_block > 0) {
363 plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);
365 lnb[i].lnb_file_offset = off;
366 lnb[i].lnb_page_offset = 0;
369 if (sz_in_block == bs)
370 lnb[i].dentry = (void *)abuf;
372 lnb[i].dentry = NULL;
374 /* this one is not supposed to fail */
375 lnb[i].page = kmem_to_page(abuf->b_data +
377 LASSERT(lnb[i].page);
379 lprocfs_counter_add(osd->od_stats,
380 LPROC_OSD_ZEROCOPY_IO, 1);
385 off_in_block += plen;
390 if (off_in_block == 0 && len < bs &&
391 off + len >= obj->oo_attr.la_size)
392 lprocfs_counter_add(osd->od_stats,
393 LPROC_OSD_TAIL_IO, 1);
395 /* can't use zerocopy, allocate temp. buffers */
396 while (sz_in_block > 0) {
397 plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);
399 lnb[i].lnb_file_offset = off;
400 lnb[i].lnb_page_offset = 0;
403 lnb[i].dentry = NULL;
405 lnb[i].page = alloc_page(OSD_GFP_IO);
406 if (unlikely(lnb[i].page == NULL))
407 GOTO(out_err, -ENOMEM);
409 LASSERT(lnb[i].page->mapping == NULL);
410 lnb[i].page->mapping = (void *)obj;
412 cfs_atomic_inc(&osd->od_zerocopy_alloc);
413 lprocfs_counter_add(osd->od_stats,
414 LPROC_OSD_COPY_IO, 1);
428 osd_bufs_put(env, &obj->oo_dt, lnb, npages);
432 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
433 loff_t offset, ssize_t len, struct niobuf_local *lnb,
434 int rw, struct lustre_capa *capa)
436 struct osd_object *obj = osd_dt_obj(dt);
439 LASSERT(dt_object_exists(dt));
443 rc = osd_bufs_get_read(env, obj, offset, len, lnb);
445 rc = osd_bufs_get_write(env, obj, offset, len, lnb);
450 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
451 struct niobuf_local *lnb, int npages)
453 struct osd_object *obj = osd_dt_obj(dt);
455 LASSERT(dt_object_exists(dt));
461 /* Return number of blocks that aren't mapped in the [start, start + size]
463 static int osd_count_not_mapped(struct osd_object *obj, uint64_t start,
466 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)obj->oo_db;
477 if (dn->dn_maxblkid == 0) {
478 if (start + size <= dn->dn_datablksz)
480 if (start < dn->dn_datablksz)
481 start = dn->dn_datablksz;
482 /* assume largest block size */
483 blkshift = SPA_MAXBLOCKSHIFT;
485 /* blocksize can't change */
486 blkshift = dn->dn_datablkshift;
489 /* compute address of last block */
490 end = (start + size - 1) >> blkshift;
491 /* align start on block boundaries */
494 /* size is null, can't be mapped */
495 if (obj->oo_attr.la_size == 0 || dn->dn_maxblkid == 0)
496 GOTO(out, size = (end - start + 1) << blkshift);
498 /* beyond EOF, can't be mapped */
499 if (start > dn->dn_maxblkid)
500 GOTO(out, size = (end - start + 1) << blkshift);
503 for (blkid = start; blkid <= end; blkid++) {
504 if (blkid == dn->dn_maxblkid)
505 /* this one is mapped for sure */
507 if (blkid > dn->dn_maxblkid) {
508 size += (end - blkid + 1) << blkshift;
512 rc = dbuf_hold_impl(dn, 0, blkid, TRUE, FTAG, &db);
514 /* for ENOENT (block not mapped) and any other errors,
515 * assume the block isn't mapped */
516 size += 1 << blkshift;
528 static int osd_declare_write_commit(const struct lu_env *env,
529 struct dt_object *dt,
530 struct niobuf_local *lnb, int npages,
533 struct osd_object *obj = osd_dt_obj(dt);
534 struct osd_device *osd = osd_obj2dev(obj);
535 struct osd_thandle *oh;
538 int i, rc, flags = 0;
539 bool ignore_quota = false, synced = false;
543 LASSERT(dt_object_exists(dt));
549 oh = container_of0(th, struct osd_thandle, ot_super);
551 for (i = 0; i < npages; i++) {
553 /* ENOSPC, network RPC error, etc.
554 * We don't want to book space for pages which will be
555 * skipped in osd_write_commit(). Hence we skip pages
556 * with lnb_rc != 0 here too */
558 /* ignore quota for the whole request if any page is from
559 * client cache or written by root.
561 * XXX we could handle this on per-lnb basis as done by
563 if ((lnb[i].flags & OBD_BRW_NOQUOTA) ||
564 !(lnb[i].flags & OBD_BRW_SYNC))
567 /* first valid lnb */
568 offset = lnb[i].lnb_file_offset;
572 if (offset + size == lnb[i].lnb_file_offset) {
573 /* this lnb is contiguous to the previous one */
578 dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object, offset,size);
580 /* estimating space that will be consumed by a write is rather
581 * complicated with ZFS. As a consequence, we don't account for
582 * indirect blocks and quota overrun will be adjusted once the
583 * operation is committed, if required. */
584 space += osd_count_not_mapped(obj, offset, size);
586 offset = lnb->lnb_file_offset;
591 dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object, offset,size);
592 space += osd_count_not_mapped(obj, offset, size);
595 dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
597 oh->ot_write_commit = 1; /* used in osd_trans_start() for fail_loc */
599 /* backend zfs filesystem might be configured to store multiple data
601 space *= osd->od_objset.os->os_copies;
603 CDEBUG(D_QUOTA, "writting %d pages, reserving "LPD64"K of quota "
604 "space\n", npages, space);
607 /* acquire quota space if needed */
608 rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
609 obj->oo_attr.la_gid, space, oh, true, &flags,
612 if (!synced && rc == -EDQUOT && (flags & QUOTA_FL_SYNC) != 0) {
613 dt_sync(env, th->th_dev);
615 CDEBUG(D_QUOTA, "retry after sync\n");
620 /* we need only to store the overquota flags in the first lnb for
621 * now, once we support multiple objects BRW, this code needs be
623 if (flags & QUOTA_FL_OVER_USRQUOTA)
624 lnb[0].flags |= OBD_BRW_OVER_USRQUOTA;
625 if (flags & QUOTA_FL_OVER_GRPQUOTA)
626 lnb[0].flags |= OBD_BRW_OVER_GRPQUOTA;
631 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
632 struct niobuf_local *lnb, int npages,
635 struct osd_object *obj = osd_dt_obj(dt);
636 struct osd_device *osd = osd_obj2dev(obj);
637 udmu_objset_t *uos = &osd->od_objset;
638 struct osd_thandle *oh;
639 uint64_t new_size = 0;
643 LASSERT(dt_object_exists(dt));
647 oh = container_of0(th, struct osd_thandle, ot_super);
649 for (i = 0; i < npages; i++) {
650 CDEBUG(D_INODE, "write %u bytes at %u\n",
651 (unsigned) lnb[i].len,
652 (unsigned) lnb[i].lnb_file_offset);
655 /* ENOSPC, network RPC error, etc.
656 * Unlike ldiskfs, zfs allocates new blocks on rewrite,
657 * so we skip this page if lnb_rc is set to -ENOSPC */
658 CDEBUG(D_INODE, "obj "DFID": skipping lnb[%u]: rc=%d\n",
659 PFID(lu_object_fid(&dt->do_lu)), i,
664 if (lnb[i].page->mapping == (void *)obj) {
665 dmu_write(osd->od_objset.os, obj->oo_db->db_object,
666 lnb[i].lnb_file_offset, lnb[i].len,
667 kmap(lnb[i].page), oh->ot_tx);
669 } else if (lnb[i].dentry) {
670 LASSERT(((unsigned long)lnb[i].dentry & 1) == 0);
671 /* buffer loaned for zerocopy, try to use it.
672 * notice that dmu_assign_arcbuf() is smart
673 * enough to recognize changed blocksize
674 * in this case it fallbacks to dmu_write() */
675 dmu_assign_arcbuf(obj->oo_db, lnb[i].lnb_file_offset,
676 (void *)lnb[i].dentry, oh->ot_tx);
677 /* drop the reference, otherwise osd_put_bufs()
678 * will be releasing it - bad! */
679 lnb[i].dentry = NULL;
680 cfs_atomic_dec(&osd->od_zerocopy_loan);
683 if (new_size < lnb[i].lnb_file_offset + lnb[i].len)
684 new_size = lnb[i].lnb_file_offset + lnb[i].len;
687 if (unlikely(new_size == 0)) {
688 /* no pages to write, no transno is needed */
690 /* it is important to return 0 even when all lnb_rc == -ENOSPC
691 * since ofd_commitrw_write() retries several times on ENOSPC */
695 write_lock(&obj->oo_attr_lock);
696 if (obj->oo_attr.la_size < new_size) {
697 obj->oo_attr.la_size = new_size;
698 write_unlock(&obj->oo_attr_lock);
699 /* osd_object_sa_update() will be copying directly from
700 * oo_attr into dbuf. any update within a single txg will copy
702 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
703 &obj->oo_attr.la_size, 8, oh);
705 write_unlock(&obj->oo_attr_lock);
711 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
712 struct niobuf_local *lnb, int npages)
714 struct osd_object *obj = osd_dt_obj(dt);
719 LASSERT(dt_object_exists(dt));
722 for (i = 0; i < npages; i++) {
723 buf.lb_buf = kmap(lnb[i].page);
724 buf.lb_len = lnb[i].len;
725 offset = lnb[i].lnb_file_offset;
727 CDEBUG(D_OTHER, "read %u bytes at %u\n",
728 (unsigned) lnb[i].len,
729 (unsigned) lnb[i].lnb_file_offset);
730 lnb[i].rc = osd_read(env, dt, &buf, &offset, NULL);
733 if (lnb[i].rc < buf.lb_len) {
734 /* all subsequent rc should be 0 */
745 * Punch/truncate an object
747 * IN: db - dmu_buf of the object to free data in.
748 * off - start of section to free.
749 * len - length of section to free (DMU_OBJECT_END => to EOF).
751 * RETURN: 0 if success
752 * error code if failure
754 * The transaction passed to this routine must have
755 * dmu_tx_hold_sa() and if off < size, dmu_tx_hold_free()
756 * called and then assigned to a transaction group.
758 static int __osd_object_punch(objset_t *os, dmu_buf_t *db, dmu_tx_t *tx,
759 uint64_t size, uint64_t off, uint64_t len)
763 /* Assert that the transaction has been assigned to a
764 transaction group. */
765 LASSERT(tx->tx_txg != 0);
767 * Nothing to do if file already at desired length.
769 if (len == DMU_OBJECT_END && size == off)
773 rc = -dmu_free_range(os, db->db_object, off, len, tx);
778 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
779 __u64 start, __u64 end, struct thandle *th,
780 struct lustre_capa *capa)
782 struct osd_object *obj = osd_dt_obj(dt);
783 struct osd_device *osd = osd_obj2dev(obj);
784 udmu_objset_t *uos = &osd->od_objset;
785 struct osd_thandle *oh;
790 LASSERT(dt_object_exists(dt));
791 LASSERT(osd_invariant(obj));
794 oh = container_of0(th, struct osd_thandle, ot_super);
796 write_lock(&obj->oo_attr_lock);
798 if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
799 len = DMU_OBJECT_END;
802 write_unlock(&obj->oo_attr_lock);
804 rc = __osd_object_punch(osd->od_objset.os, obj->oo_db, oh->ot_tx,
805 obj->oo_attr.la_size, start, len);
807 if (len == DMU_OBJECT_END) {
808 write_lock(&obj->oo_attr_lock);
809 obj->oo_attr.la_size = start;
810 write_unlock(&obj->oo_attr_lock);
811 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
812 &obj->oo_attr.la_size, 8, oh);
817 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
818 __u64 start, __u64 end, struct thandle *handle)
820 struct osd_object *obj = osd_dt_obj(dt);
821 struct osd_device *osd = osd_obj2dev(obj);
822 struct osd_thandle *oh;
826 oh = container_of0(handle, struct osd_thandle, ot_super);
828 read_lock(&obj->oo_attr_lock);
829 if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
830 len = DMU_OBJECT_END;
834 /* declare we'll free some blocks ... */
835 if (start < obj->oo_attr.la_size) {
836 read_unlock(&obj->oo_attr_lock);
837 dmu_tx_hold_free(oh->ot_tx, obj->oo_db->db_object, start, len);
839 read_unlock(&obj->oo_attr_lock);
842 /* ... and we'll modify size attribute */
843 dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
845 RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
846 obj->oo_attr.la_gid, 0, oh, true, NULL,
851 struct dt_body_operations osd_body_ops = {
852 .dbo_read = osd_read,
853 .dbo_declare_write = osd_declare_write,
854 .dbo_write = osd_write,
855 .dbo_bufs_get = osd_bufs_get,
856 .dbo_bufs_put = osd_bufs_put,
857 .dbo_write_prep = osd_write_prep,
858 .dbo_declare_write_commit = osd_declare_write_commit,
859 .dbo_write_commit = osd_write_commit,
860 .dbo_read_prep = osd_read_prep,
861 .do_declare_punch = osd_declare_punch,
862 .do_punch = osd_punch,