4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd-zfs/osd_io.c
34 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35 * Author: Mike Pershin <tappro@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_OSD
40 #include <lustre_ver.h>
41 #include <libcfs/libcfs.h>
42 #include <obd_support.h>
43 #include <lustre_net.h>
45 #include <obd_class.h>
46 #include <lustre_disk.h>
47 #include <lustre_fid.h>
48 #include <lustre/lustre_idl.h> /* LLOG_MIN_CHUNK_SIZE definition */
50 #include "osd_internal.h"
52 #include <sys/dnode.h>
57 #include <sys/spa_impl.h>
58 #include <sys/zfs_znode.h>
59 #include <sys/dmu_tx.h>
60 #include <sys/dmu_objset.h>
61 #include <sys/dsl_prop.h>
62 #include <sys/sa_impl.h>
65 static char *osd_0copy_tag = "zerocopy";
68 static void record_start_io(struct osd_device *osd, int rw, int discont_pages)
70 struct obd_histogram *h = osd->od_brw_stats.hist;
73 atomic_inc(&osd->od_r_in_flight);
74 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
75 atomic_read(&osd->od_r_in_flight));
76 lprocfs_oh_tally(&h[BRW_R_DISCONT_PAGES], discont_pages);
79 atomic_inc(&osd->od_w_in_flight);
80 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
81 atomic_read(&osd->od_w_in_flight));
82 lprocfs_oh_tally(&h[BRW_W_DISCONT_PAGES], discont_pages);
87 static void record_end_io(struct osd_device *osd, int rw,
88 unsigned long elapsed, int disksize, int npages)
90 struct obd_histogram *h = osd->od_brw_stats.hist;
93 atomic_dec(&osd->od_r_in_flight);
94 lprocfs_oh_tally_log2(&h[BRW_R_PAGES], npages);
96 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], disksize);
98 lprocfs_oh_tally_log2(&h[BRW_R_IO_TIME], elapsed);
101 atomic_dec(&osd->od_w_in_flight);
102 lprocfs_oh_tally_log2(&h[BRW_W_PAGES], npages);
104 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], disksize);
106 lprocfs_oh_tally_log2(&h[BRW_W_IO_TIME], elapsed);
110 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
111 struct lu_buf *buf, loff_t *pos)
113 struct osd_object *obj = osd_dt_obj(dt);
114 struct osd_device *osd = osd_obj2dev(obj);
116 int size = buf->lb_len;
120 LASSERT(dt_object_exists(dt));
123 start = cfs_time_current();
125 read_lock(&obj->oo_attr_lock);
126 old_size = obj->oo_attr.la_size;
127 read_unlock(&obj->oo_attr_lock);
129 if (*pos + size > old_size) {
133 size = old_size - *pos;
136 record_start_io(osd, READ, 0);
138 rc = osd_dmu_read(osd, obj->oo_dn, *pos, size, buf->lb_buf,
141 record_end_io(osd, READ, cfs_time_current() - start, size,
150 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
151 const struct lu_buf *buf, loff_t pos,
154 struct osd_object *obj = osd_dt_obj(dt);
155 struct osd_device *osd = osd_obj2dev(obj);
156 struct osd_thandle *oh;
160 oh = container_of0(th, struct osd_thandle, ot_super);
162 /* in some cases declare can race with creation (e.g. llog)
163 * and we need to wait till object is initialized. notice
164 * LOHA_EXISTs is supposed to be the last step in the
167 /* size change (in dnode) will be declared by dmu_tx_hold_write() */
168 if (dt_object_exists(dt))
169 oid = obj->oo_dn->dn_object;
171 oid = DMU_NEW_OBJECT;
173 /* XXX: we still miss for append declaration support in ZFS
174 * -1 means append which is used by llog mostly, llog
175 * can grow upto LLOG_MIN_CHUNK_SIZE*8 records */
177 pos = max_t(loff_t, 256 * 8 * LLOG_MIN_CHUNK_SIZE,
178 obj->oo_attr.la_size + (2 << 20));
179 osd_tx_hold_write(oh->ot_tx, oid, obj->oo_dn, pos, buf->lb_len);
181 /* dt_declare_write() is usually called for system objects, such
182 * as llog or last_rcvd files. We needn't enforce quota on those
183 * objects, so always set the lqi_space as 0. */
184 RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
185 obj->oo_attr.la_gid, 0, oh, true, NULL,
189 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
190 const struct lu_buf *buf, loff_t *pos,
191 struct thandle *th, int ignore_quota)
193 struct osd_object *obj = osd_dt_obj(dt);
194 struct osd_device *osd = osd_obj2dev(obj);
195 struct osd_thandle *oh;
196 uint64_t offset = *pos;
201 LASSERT(dt_object_exists(dt));
205 oh = container_of0(th, struct osd_thandle, ot_super);
207 osd_dmu_write(osd, obj->oo_dn, offset, (uint64_t)buf->lb_len,
208 buf->lb_buf, oh->ot_tx);
209 write_lock(&obj->oo_attr_lock);
210 if (obj->oo_attr.la_size < offset + buf->lb_len) {
211 obj->oo_attr.la_size = offset + buf->lb_len;
212 write_unlock(&obj->oo_attr_lock);
213 /* osd_object_sa_update() will be copying directly from oo_attr
214 * into dbuf. any update within a single txg will copy the
216 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
217 &obj->oo_attr.la_size, 8, oh);
221 write_unlock(&obj->oo_attr_lock);
232 * XXX: for the moment I don't want to use lnb_flags for osd-internal
233 * purposes as it's not very well defined ...
234 * instead I use the lowest bit of the address so that:
235 * arc buffer: .lnb_data = abuf (arc we loan for write)
236 * dbuf buffer: .lnb_data = dbuf | 1 (dbuf we get for read)
237 * copy buffer: .lnb_page->mapping = obj (page we allocate for write)
241 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
242 struct niobuf_local *lnb, int npages)
244 struct osd_object *obj = osd_dt_obj(dt);
245 struct osd_device *osd = osd_obj2dev(obj);
249 LASSERT(dt_object_exists(dt));
252 for (i = 0; i < npages; i++) {
253 if (lnb[i].lnb_page == NULL)
255 if (lnb[i].lnb_page->mapping == (void *)obj) {
256 /* this is anonymous page allocated for copy-write */
257 lnb[i].lnb_page->mapping = NULL;
258 __free_page(lnb[i].lnb_page);
259 atomic_dec(&osd->od_zerocopy_alloc);
261 /* see comment in osd_bufs_get_read() */
262 ptr = (unsigned long)lnb[i].lnb_data;
265 dmu_buf_rele((void *)ptr, osd_0copy_tag);
266 atomic_dec(&osd->od_zerocopy_pin);
267 } else if (lnb[i].lnb_data != NULL) {
268 dmu_return_arcbuf(lnb[i].lnb_data);
269 atomic_dec(&osd->od_zerocopy_loan);
272 lnb[i].lnb_page = NULL;
273 lnb[i].lnb_data = NULL;
279 static inline struct page *kmem_to_page(void *addr)
281 LASSERT(!((unsigned long)addr & ~PAGE_MASK));
282 if (is_vmalloc_addr(addr))
283 return vmalloc_to_page(addr);
285 return virt_to_page(addr);
289 * Prepare buffers for read.
291 * The function maps the range described by \a off and \a len to \a lnb array.
292 * dmu_buf_hold_array_by_bonus() finds/creates appropriate ARC buffers, then
293 * we fill \a lnb array with the pages storing ARC buffers. Notice the current
294 * implementationt passes TRUE to dmu_buf_hold_array_by_bonus() to fill ARC
295 * buffers with actual data, I/O is done in the conext of osd_bufs_get_read().
296 * A better implementation would just return the buffers (potentially unfilled)
297 * and subsequent osd_read_prep() would do I/O for many ranges concurrently.
299 * \param[in] env environment
300 * \param[in] obj object
301 * \param[in] off offset in bytes
302 * \param[in] len the number of bytes to access
303 * \param[out] lnb array of local niobufs pointing to the buffers with data
305 * \retval 0 for success
306 * \retval negative error number of failure
308 static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
309 loff_t off, ssize_t len, struct niobuf_local *lnb)
311 struct osd_device *osd = osd_obj2dev(obj);
312 unsigned long start = cfs_time_current();
313 int rc, i, numbufs, npages = 0;
317 record_start_io(osd, READ, 0);
319 /* grab buffers for read:
320 * OSD API let us to grab buffers first, then initiate IO(s)
321 * so that all required IOs will be done in parallel, but at the
322 * moment DMU doesn't provide us with a method to grab buffers.
323 * If we discover this is a vital for good performance we
324 * can get own replacement for dmu_buf_hold_array_by_bonus().
327 rc = -dmu_buf_hold_array_by_bonus(&obj->oo_dn->dn_bonus->db,
328 off, len, TRUE, osd_0copy_tag,
333 for (i = 0; i < numbufs; i++) {
334 int bufoff, tocpy, thispage;
339 atomic_inc(&osd->od_zerocopy_pin);
341 bufoff = off - dbp[i]->db_offset;
342 tocpy = min_t(int, dbp[i]->db_size - bufoff, len);
344 /* kind of trick to differentiate dbuf vs. arcbuf */
345 LASSERT(((unsigned long)dbp[i] & 1) == 0);
346 dbf = (void *) ((unsigned long)dbp[i] | 1);
349 thispage = PAGE_SIZE;
350 thispage -= bufoff & (PAGE_SIZE - 1);
351 thispage = min(tocpy, thispage);
354 lnb->lnb_file_offset = off;
355 lnb->lnb_page_offset = bufoff & ~PAGE_MASK;
356 lnb->lnb_len = thispage;
357 lnb->lnb_page = kmem_to_page(dbp[i]->db_data +
359 /* mark just a single slot: we need this
360 * reference to dbuf to be released once */
373 /* steal dbuf so dmu_buf_rele_array() can't release
378 dmu_buf_rele_array(dbp, numbufs, osd_0copy_tag);
381 record_end_io(osd, READ, cfs_time_current() - start,
382 npages * PAGE_SIZE, npages);
388 osd_bufs_put(env, &obj->oo_dt, lnb - npages, npages);
392 static inline arc_buf_t *osd_request_arcbuf(dnode_t *dn, size_t bs)
396 abuf = dmu_request_arcbuf(&dn->dn_bonus->db, bs);
398 return ERR_PTR(-ENOMEM);
400 #if ZFS_VERSION_CODE < OBD_OCD_VERSION(0, 7, 0, 0)
402 * ZFS prior to 0.7.0 doesn't guarantee PAGE_SIZE alignment for zio
403 * blocks smaller than (PAGE_SIZE << 2). This poses a problem of
404 * setting up page array for RDMA transfer. See LU-9305.
406 if ((unsigned long)abuf->b_data & ~PAGE_MASK) {
407 dmu_return_arcbuf(abuf);
415 static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj,
416 loff_t off, ssize_t len, struct niobuf_local *lnb)
418 struct osd_device *osd = osd_obj2dev(obj);
419 int plen, off_in_block, sz_in_block;
420 int rc, i = 0, npages = 0;
421 dnode_t *dn = obj->oo_dn;
423 uint32_t bs = dn->dn_datablksz;
427 * currently only full blocks are subject to zerocopy approach:
428 * so that we're sure nobody is trying to update the same block
431 LASSERT(npages < PTLRPC_MAX_BRW_PAGES);
433 off_in_block = off & (bs - 1);
434 sz_in_block = min_t(int, bs - off_in_block, len);
437 if (sz_in_block == bs) {
438 /* full block, try to use zerocopy */
439 abuf = osd_request_arcbuf(dn, bs);
440 if (unlikely(IS_ERR(abuf)))
441 GOTO(out_err, rc = PTR_ERR(abuf));
445 atomic_inc(&osd->od_zerocopy_loan);
447 /* go over pages arcbuf contains, put them as
448 * local niobufs for ptlrpc's bulks */
449 while (sz_in_block > 0) {
450 plen = min_t(int, sz_in_block, PAGE_SIZE);
452 lnb[i].lnb_file_offset = off;
453 lnb[i].lnb_page_offset = 0;
454 lnb[i].lnb_len = plen;
456 if (sz_in_block == bs)
457 lnb[i].lnb_data = abuf;
459 lnb[i].lnb_data = NULL;
461 /* this one is not supposed to fail */
462 lnb[i].lnb_page = kmem_to_page(abuf->b_data +
464 LASSERT(lnb[i].lnb_page);
466 lprocfs_counter_add(osd->od_stats,
467 LPROC_OSD_ZEROCOPY_IO, 1);
472 off_in_block += plen;
477 if (off_in_block == 0 && len < bs &&
478 off + len >= obj->oo_attr.la_size)
479 lprocfs_counter_add(osd->od_stats,
480 LPROC_OSD_TAIL_IO, 1);
482 /* can't use zerocopy, allocate temp. buffers */
483 while (sz_in_block > 0) {
484 plen = min_t(int, sz_in_block, PAGE_SIZE);
486 lnb[i].lnb_file_offset = off;
487 lnb[i].lnb_page_offset = 0;
488 lnb[i].lnb_len = plen;
490 lnb[i].lnb_data = NULL;
492 lnb[i].lnb_page = alloc_page(OSD_GFP_IO);
493 if (unlikely(lnb[i].lnb_page == NULL))
494 GOTO(out_err, rc = -ENOMEM);
496 LASSERT(lnb[i].lnb_page->mapping == NULL);
497 lnb[i].lnb_page->mapping = (void *)obj;
499 atomic_inc(&osd->od_zerocopy_alloc);
500 lprocfs_counter_add(osd->od_stats,
501 LPROC_OSD_COPY_IO, 1);
515 osd_bufs_put(env, &obj->oo_dt, lnb, npages);
519 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
520 loff_t offset, ssize_t len, struct niobuf_local *lnb,
523 struct osd_object *obj = osd_dt_obj(dt);
526 LASSERT(dt_object_exists(dt));
530 rc = osd_bufs_get_read(env, obj, offset, len, lnb);
532 rc = osd_bufs_get_write(env, obj, offset, len, lnb);
537 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
538 struct niobuf_local *lnb, int npages)
540 struct osd_object *obj = osd_dt_obj(dt);
542 LASSERT(dt_object_exists(dt));
548 static inline uint64_t osd_roundup2blocksz(uint64_t size,
554 size += offset % blksz;
556 if (likely(is_power_of_2(blksz)))
557 return PO2_ROUNDUP_TYPED(size, blksz, uint64_t);
564 static int osd_declare_write_commit(const struct lu_env *env,
565 struct dt_object *dt,
566 struct niobuf_local *lnb, int npages,
569 struct osd_object *obj = osd_dt_obj(dt);
570 struct osd_device *osd = osd_obj2dev(obj);
571 struct osd_thandle *oh;
574 uint32_t blksz = obj->oo_dn->dn_datablksz;
575 int i, rc, flags = 0;
576 bool ignore_quota = false, synced = false;
578 struct page *last_page = NULL;
579 unsigned long discont_pages = 0;
582 LASSERT(dt_object_exists(dt));
588 oh = container_of0(th, struct osd_thandle, ot_super);
590 for (i = 0; i < npages; i++) {
591 if (last_page && lnb[i].lnb_page->index != (last_page->index + 1))
593 last_page = lnb[i].lnb_page;
595 /* ENOSPC, network RPC error, etc.
596 * We don't want to book space for pages which will be
597 * skipped in osd_write_commit(). Hence we skip pages
598 * with lnb_rc != 0 here too */
600 /* ignore quota for the whole request if any page is from
601 * client cache or written by root.
603 * XXX once we drop the 1.8 client support, the checking
604 * for whether page is from cache can be simplified as:
605 * !(lnb[i].flags & OBD_BRW_SYNC)
607 * XXX we could handle this on per-lnb basis as done by
609 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
610 (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
614 /* first valid lnb */
615 offset = lnb[i].lnb_file_offset;
616 size = lnb[i].lnb_len;
619 if (offset + size == lnb[i].lnb_file_offset) {
620 /* this lnb is contiguous to the previous one */
621 size += lnb[i].lnb_len;
625 osd_tx_hold_write(oh->ot_tx, obj->oo_dn->dn_object,
626 obj->oo_dn, offset, size);
627 /* Estimating space to be consumed by a write is rather
628 * complicated with ZFS. As a consequence, we don't account for
629 * indirect blocks and just use as a rough estimate the worse
630 * case where the old space is being held by a snapshot. Quota
631 * overrun will be adjusted once the operation is committed, if
633 space += osd_roundup2blocksz(size, offset, blksz);
635 offset = lnb[i].lnb_file_offset;
636 size = lnb[i].lnb_len;
640 osd_tx_hold_write(oh->ot_tx, obj->oo_dn->dn_object, obj->oo_dn,
642 space += osd_roundup2blocksz(size, offset, blksz);
645 oh->ot_write_commit = 1; /* used in osd_trans_start() for fail_loc */
647 /* backend zfs filesystem might be configured to store multiple data
649 space *= osd->od_os->os_copies;
651 CDEBUG(D_QUOTA, "writing %d pages, reserving %lldK of quota space\n",
654 record_start_io(osd, WRITE, discont_pages);
656 /* acquire quota space if needed */
657 rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
658 obj->oo_attr.la_gid, space, oh, true, &flags,
661 if (!synced && rc == -EDQUOT && (flags & QUOTA_FL_SYNC) != 0) {
662 dt_sync(env, th->th_dev);
664 CDEBUG(D_QUOTA, "retry after sync\n");
669 /* we need only to store the overquota flags in the first lnb for
670 * now, once we support multiple objects BRW, this code needs be
672 if (flags & QUOTA_FL_OVER_USRQUOTA)
673 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
674 if (flags & QUOTA_FL_OVER_GRPQUOTA)
675 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
681 * Policy to grow ZFS block size by write pattern.
682 * For sequential write, it grows block size gradually until it reaches the
683 * maximum blocksize the dataset can support. Otherwise, it will pick a
684 * a block size by the writing region of this I/O.
686 static int osd_grow_blocksize(struct osd_object *obj, struct osd_thandle *oh,
687 uint64_t start, uint64_t end)
689 struct osd_device *osd = osd_obj2dev(obj);
690 dnode_t *dn = obj->oo_dn;
696 if (dn->dn_maxblkid > 0) /* can't change block size */
699 if (dn->dn_datablksz >= osd->od_max_blksz)
702 down_write(&obj->oo_guard);
704 blksz = dn->dn_datablksz;
705 if (blksz >= osd->od_max_blksz) /* check again after grabbing lock */
706 GOTO(out_unlock, rc);
708 /* now ZFS can support up to 16MB block size, and if the write
709 * is sequential, it just increases the block size gradually */
710 if (start <= blksz) { /* sequential */
711 blksz = (uint32_t)min_t(uint64_t, osd->od_max_blksz, end);
712 } else { /* sparse, pick a block size by write region */
713 blksz = (uint32_t)min_t(uint64_t, osd->od_max_blksz,
717 if (!is_power_of_2(blksz))
718 blksz = size_roundup_power2(blksz);
720 if (blksz > dn->dn_datablksz) {
721 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
722 blksz, 0, oh->ot_tx);
723 LASSERT(ergo(rc == 0, dn->dn_datablksz >= blksz));
725 CDEBUG(D_INODE, "object "DFID": change block size"
726 "%u -> %u error rc = %d\n",
727 PFID(lu_object_fid(&obj->oo_dt.do_lu)),
728 dn->dn_datablksz, blksz, rc);
732 up_write(&obj->oo_guard);
737 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
738 struct niobuf_local *lnb, int npages,
741 struct osd_object *obj = osd_dt_obj(dt);
742 struct osd_device *osd = osd_obj2dev(obj);
743 struct osd_thandle *oh;
744 uint64_t new_size = 0;
746 unsigned long iosize = 0;
749 LASSERT(dt_object_exists(dt));
753 oh = container_of0(th, struct osd_thandle, ot_super);
755 /* adjust block size. Assume the buffers are sorted. */
756 (void)osd_grow_blocksize(obj, oh, lnb[0].lnb_file_offset,
757 lnb[npages - 1].lnb_file_offset +
758 lnb[npages - 1].lnb_len);
760 /* LU-8791: take oo_guard to avoid the deadlock that changing block
761 * size and assigning arcbuf take place at the same time.
765 * -> osd_grow_blocksize() with osd_object::oo_guard held
766 * -> dmu_object_set_blocksize()
767 * -> dnode_set_blksz(), with dnode_t::dn_struct_rwlock
770 * -> dmu_buf_will_dirty()
772 * -> wait for the dbuf state to change
775 * -> dmu_assign_arcbuf()
776 * -> dbuf_assign_arcbuf(), set dbuf state to DB_FILL
778 * -> try to hold the read lock of dnode_t::dn_struct_rwlock
780 * By taking the read lock, it can avoid thread 2 to enter into the
781 * critical section of assigning the arcbuf, while thread 1 is
782 * changing the block size.
784 down_read(&obj->oo_guard);
785 for (i = 0; i < npages; i++) {
786 CDEBUG(D_INODE, "write %u bytes at %u\n",
787 (unsigned) lnb[i].lnb_len,
788 (unsigned) lnb[i].lnb_file_offset);
791 /* ENOSPC, network RPC error, etc.
792 * Unlike ldiskfs, zfs allocates new blocks on rewrite,
793 * so we skip this page if lnb_rc is set to -ENOSPC */
794 CDEBUG(D_INODE, "obj "DFID": skipping lnb[%u]: rc=%d\n",
795 PFID(lu_object_fid(&dt->do_lu)), i,
800 if (lnb[i].lnb_page->mapping == (void *)obj) {
801 osd_dmu_write(osd, obj->oo_dn, lnb[i].lnb_file_offset,
802 lnb[i].lnb_len, kmap(lnb[i].lnb_page),
804 kunmap(lnb[i].lnb_page);
805 } else if (lnb[i].lnb_data) {
806 LASSERT(((unsigned long)lnb[i].lnb_data & 1) == 0);
807 /* buffer loaned for zerocopy, try to use it.
808 * notice that dmu_assign_arcbuf() is smart
809 * enough to recognize changed blocksize
810 * in this case it fallbacks to dmu_write() */
811 dmu_assign_arcbuf(&obj->oo_dn->dn_bonus->db,
812 lnb[i].lnb_file_offset,
813 lnb[i].lnb_data, oh->ot_tx);
814 /* drop the reference, otherwise osd_put_bufs()
815 * will be releasing it - bad! */
816 lnb[i].lnb_data = NULL;
817 atomic_dec(&osd->od_zerocopy_loan);
820 if (new_size < lnb[i].lnb_file_offset + lnb[i].lnb_len)
821 new_size = lnb[i].lnb_file_offset + lnb[i].lnb_len;
822 iosize += lnb[i].lnb_len;
824 up_read(&obj->oo_guard);
826 if (unlikely(new_size == 0)) {
827 /* no pages to write, no transno is needed */
829 /* it is important to return 0 even when all lnb_rc == -ENOSPC
830 * since ofd_commitrw_write() retries several times on ENOSPC */
831 record_end_io(osd, WRITE, 0, 0, 0);
835 write_lock(&obj->oo_attr_lock);
836 if (obj->oo_attr.la_size < new_size) {
837 obj->oo_attr.la_size = new_size;
838 write_unlock(&obj->oo_attr_lock);
839 /* osd_object_sa_update() will be copying directly from
840 * oo_attr into dbuf. any update within a single txg will copy
842 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
843 &obj->oo_attr.la_size, 8, oh);
845 write_unlock(&obj->oo_attr_lock);
848 record_end_io(osd, WRITE, 0, iosize, npages);
853 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
854 struct niobuf_local *lnb, int npages)
856 struct osd_object *obj = osd_dt_obj(dt);
860 LASSERT(dt_object_exists(dt));
863 read_lock(&obj->oo_attr_lock);
864 eof = obj->oo_attr.la_size;
865 read_unlock(&obj->oo_attr_lock);
867 for (i = 0; i < npages; i++) {
868 if (unlikely(lnb[i].lnb_rc < 0))
871 lnb[i].lnb_rc = lnb[i].lnb_len;
873 if (lnb[i].lnb_file_offset + lnb[i].lnb_len >= eof) {
874 if (eof <= lnb[i].lnb_file_offset)
877 lnb[i].lnb_rc = eof - lnb[i].lnb_file_offset;
879 /* all subsequent rc should be 0 */
890 * Punch/truncate an object
892 * IN: db - dmu_buf of the object to free data in.
893 * off - start of section to free.
894 * len - length of section to free (DMU_OBJECT_END => to EOF).
896 * RETURN: 0 if success
897 * error code if failure
899 * The transaction passed to this routine must have
900 * dmu_tx_hold_sa() and if off < size, dmu_tx_hold_free()
901 * called and then assigned to a transaction group.
903 static int __osd_object_punch(objset_t *os, dnode_t *dn, dmu_tx_t *tx,
904 uint64_t size, uint64_t off, uint64_t len)
908 /* Assert that the transaction has been assigned to a
909 transaction group. */
910 LASSERT(tx->tx_txg != 0);
912 * Nothing to do if file already at desired length.
914 if (len == DMU_OBJECT_END && size == off)
917 /* XXX: dnode_free_range() can be used to save on dnode lookup */
919 dmu_free_range(os, dn->dn_object, off, len, tx);
924 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
925 __u64 start, __u64 end, struct thandle *th)
927 struct osd_object *obj = osd_dt_obj(dt);
928 struct osd_device *osd = osd_obj2dev(obj);
929 struct osd_thandle *oh;
934 LASSERT(dt_object_exists(dt));
935 LASSERT(osd_invariant(obj));
938 oh = container_of0(th, struct osd_thandle, ot_super);
940 write_lock(&obj->oo_attr_lock);
942 if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
943 len = DMU_OBJECT_END;
946 write_unlock(&obj->oo_attr_lock);
948 rc = __osd_object_punch(osd->od_os, obj->oo_dn, oh->ot_tx,
949 obj->oo_attr.la_size, start, len);
951 if (len == DMU_OBJECT_END) {
952 write_lock(&obj->oo_attr_lock);
953 obj->oo_attr.la_size = start;
954 write_unlock(&obj->oo_attr_lock);
955 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
956 &obj->oo_attr.la_size, 8, oh);
961 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
962 __u64 start, __u64 end, struct thandle *handle)
964 struct osd_object *obj = osd_dt_obj(dt);
965 struct osd_device *osd = osd_obj2dev(obj);
966 struct osd_thandle *oh;
970 oh = container_of0(handle, struct osd_thandle, ot_super);
972 read_lock(&obj->oo_attr_lock);
973 if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
974 len = DMU_OBJECT_END;
978 /* declare we'll free some blocks ... */
979 if (start < obj->oo_attr.la_size) {
980 read_unlock(&obj->oo_attr_lock);
981 dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object, start, len);
983 read_unlock(&obj->oo_attr_lock);
986 RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
987 obj->oo_attr.la_gid, 0, oh, true, NULL,
991 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
992 __u64 start, __u64 end, enum lu_ladvise_type advice)
1006 struct dt_body_operations osd_body_ops = {
1007 .dbo_read = osd_read,
1008 .dbo_declare_write = osd_declare_write,
1009 .dbo_write = osd_write,
1010 .dbo_bufs_get = osd_bufs_get,
1011 .dbo_bufs_put = osd_bufs_put,
1012 .dbo_write_prep = osd_write_prep,
1013 .dbo_declare_write_commit = osd_declare_write_commit,
1014 .dbo_write_commit = osd_write_commit,
1015 .dbo_read_prep = osd_read_prep,
1016 .dbo_declare_punch = osd_declare_punch,
1017 .dbo_punch = osd_punch,
1018 .dbo_ladvise = osd_ladvise,