4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/osd-zfs/osd_io.c
38 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
39 * Author: Mike Pershin <tappro@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_OSD
44 #include <lustre_ver.h>
45 #include <libcfs/libcfs.h>
46 #include <obd_support.h>
47 #include <lustre_net.h>
49 #include <obd_class.h>
50 #include <lustre_disk.h>
51 #include <lustre_fid.h>
52 #include <lustre/lustre_idl.h> /* LLOG_MIN_CHUNK_SIZE definition */
54 #include "osd_internal.h"
56 #include <sys/dnode.h>
61 #include <sys/spa_impl.h>
62 #include <sys/zfs_znode.h>
63 #include <sys/dmu_tx.h>
64 #include <sys/dmu_objset.h>
65 #include <sys/dsl_prop.h>
66 #include <sys/sa_impl.h>
69 static char *osd_zerocopy_tag = "zerocopy";
72 static void record_start_io(struct osd_device *osd, int rw, int discont_pages)
74 struct obd_histogram *h = osd->od_brw_stats.hist;
77 atomic_inc(&osd->od_r_in_flight);
78 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
79 atomic_read(&osd->od_r_in_flight));
80 lprocfs_oh_tally(&h[BRW_R_DISCONT_PAGES], discont_pages);
83 atomic_inc(&osd->od_w_in_flight);
84 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
85 atomic_read(&osd->od_w_in_flight));
86 lprocfs_oh_tally(&h[BRW_W_DISCONT_PAGES], discont_pages);
91 static void record_end_io(struct osd_device *osd, int rw,
92 unsigned long elapsed, int disksize, int npages)
94 struct obd_histogram *h = osd->od_brw_stats.hist;
97 atomic_dec(&osd->od_r_in_flight);
98 lprocfs_oh_tally_log2(&h[BRW_R_PAGES], npages);
100 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], disksize);
102 lprocfs_oh_tally_log2(&h[BRW_R_IO_TIME], elapsed);
105 atomic_dec(&osd->od_w_in_flight);
106 lprocfs_oh_tally_log2(&h[BRW_W_PAGES], npages);
108 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], disksize);
110 lprocfs_oh_tally_log2(&h[BRW_W_IO_TIME], elapsed);
114 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
115 struct lu_buf *buf, loff_t *pos)
117 struct osd_object *obj = osd_dt_obj(dt);
118 struct osd_device *osd = osd_obj2dev(obj);
120 int size = buf->lb_len;
124 LASSERT(dt_object_exists(dt));
127 start = cfs_time_current();
129 read_lock(&obj->oo_attr_lock);
130 old_size = obj->oo_attr.la_size;
131 read_unlock(&obj->oo_attr_lock);
133 if (*pos + size > old_size) {
137 size = old_size - *pos;
140 record_start_io(osd, READ, 0);
142 rc = -dmu_read(osd->od_os, obj->oo_db->db_object, *pos, size,
143 buf->lb_buf, DMU_READ_PREFETCH);
145 record_end_io(osd, READ, cfs_time_current() - start, size,
146 size >> PAGE_CACHE_SHIFT);
154 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
155 const struct lu_buf *buf, loff_t pos,
158 struct osd_object *obj = osd_dt_obj(dt);
159 struct osd_device *osd = osd_obj2dev(obj);
160 struct osd_thandle *oh;
164 oh = container_of0(th, struct osd_thandle, ot_super);
166 /* in some cases declare can race with creation (e.g. llog)
167 * and we need to wait till object is initialized. notice
168 * LOHA_EXISTs is supposed to be the last step in the
171 /* declare possible size change. notice we can't check
172 * current size here as another thread can change it */
174 if (dt_object_exists(dt)) {
176 oid = obj->oo_db->db_object;
178 dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
180 oid = DMU_NEW_OBJECT;
181 dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
184 /* XXX: we still miss for append declaration support in ZFS
185 * -1 means append which is used by llog mostly, llog
186 * can grow upto LLOG_MIN_CHUNK_SIZE*8 records */
188 pos = max_t(loff_t, 256 * 8 * LLOG_MIN_CHUNK_SIZE,
189 obj->oo_attr.la_size + (2 << 20));
190 dmu_tx_hold_write(oh->ot_tx, oid, pos, buf->lb_len);
192 /* dt_declare_write() is usually called for system objects, such
193 * as llog or last_rcvd files. We needn't enforce quota on those
194 * objects, so always set the lqi_space as 0. */
195 RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
196 obj->oo_attr.la_gid, 0, oh, true, NULL,
200 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
201 const struct lu_buf *buf, loff_t *pos,
202 struct thandle *th, int ignore_quota)
204 struct osd_object *obj = osd_dt_obj(dt);
205 struct osd_device *osd = osd_obj2dev(obj);
206 struct osd_thandle *oh;
207 uint64_t offset = *pos;
212 LASSERT(dt_object_exists(dt));
216 oh = container_of0(th, struct osd_thandle, ot_super);
218 record_start_io(osd, WRITE, 0);
220 dmu_write(osd->od_os, obj->oo_db->db_object, offset,
221 (uint64_t)buf->lb_len, buf->lb_buf, oh->ot_tx);
222 write_lock(&obj->oo_attr_lock);
223 if (obj->oo_attr.la_size < offset + buf->lb_len) {
224 obj->oo_attr.la_size = offset + buf->lb_len;
225 write_unlock(&obj->oo_attr_lock);
226 /* osd_object_sa_update() will be copying directly from oo_attr
227 * into dbuf. any update within a single txg will copy the
229 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
230 &obj->oo_attr.la_size, 8, oh);
234 write_unlock(&obj->oo_attr_lock);
241 record_end_io(osd, WRITE, 0, buf->lb_len,
242 buf->lb_len >> PAGE_CACHE_SHIFT);
248 * XXX: for the moment I don't want to use lnb_flags for osd-internal
249 * purposes as it's not very well defined ...
250 * instead I use the lowest bit of the address so that:
251 * arc buffer: .lnb_data = abuf (arc we loan for write)
252 * dbuf buffer: .lnb_data = dbuf | 1 (dbuf we get for read)
253 * copy buffer: .lnb_page->mapping = obj (page we allocate for write)
257 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
258 struct niobuf_local *lnb, int npages)
260 struct osd_object *obj = osd_dt_obj(dt);
261 struct osd_device *osd = osd_obj2dev(obj);
265 LASSERT(dt_object_exists(dt));
268 for (i = 0; i < npages; i++) {
269 if (lnb[i].lnb_page == NULL)
271 if (lnb[i].lnb_page->mapping == (void *)obj) {
272 /* this is anonymous page allocated for copy-write */
273 lnb[i].lnb_page->mapping = NULL;
274 __free_page(lnb[i].lnb_page);
275 atomic_dec(&osd->od_zerocopy_alloc);
277 /* see comment in osd_bufs_get_read() */
278 ptr = (unsigned long)lnb[i].lnb_data;
281 dmu_buf_rele((void *)ptr, osd_zerocopy_tag);
282 atomic_dec(&osd->od_zerocopy_pin);
283 } else if (lnb[i].lnb_data != NULL) {
284 dmu_return_arcbuf(lnb[i].lnb_data);
285 atomic_dec(&osd->od_zerocopy_loan);
288 lnb[i].lnb_page = NULL;
289 lnb[i].lnb_data = NULL;
295 static inline struct page *kmem_to_page(void *addr)
297 if (is_vmalloc_addr(addr))
298 return vmalloc_to_page(addr);
300 return virt_to_page(addr);
304 * Prepare buffers for read.
306 * The function maps the range described by \a off and \a len to \a lnb array.
307 * dmu_buf_hold_array_by_bonus() finds/creates appropriate ARC buffers, then
308 * we fill \a lnb array with the pages storing ARC buffers. Notice the current
309 * implementationt passes TRUE to dmu_buf_hold_array_by_bonus() to fill ARC
310 * buffers with actual data, I/O is done in the conext of osd_bufs_get_read().
311 * A better implementation would just return the buffers (potentially unfilled)
312 * and subsequent osd_read_prep() would do I/O for many ranges concurrently.
314 * \param[in] env environment
315 * \param[in] obj object
316 * \param[in] off offset in bytes
317 * \param[in] len the number of bytes to access
318 * \param[out] lnb array of local niobufs pointing to the buffers with data
320 * \retval 0 for success
321 * \retval negative error number of failure
323 static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
324 loff_t off, ssize_t len, struct niobuf_local *lnb)
326 struct osd_device *osd = osd_obj2dev(obj);
327 unsigned long start = cfs_time_current();
328 int rc, i, numbufs, npages = 0;
332 record_start_io(osd, READ, 0);
334 /* grab buffers for read:
335 * OSD API let us to grab buffers first, then initiate IO(s)
336 * so that all required IOs will be done in parallel, but at the
337 * moment DMU doesn't provide us with a method to grab buffers.
338 * If we discover this is a vital for good performance we
339 * can get own replacement for dmu_buf_hold_array_by_bonus().
342 rc = -dmu_buf_hold_array_by_bonus(obj->oo_db, off, len, TRUE,
343 osd_zerocopy_tag, &numbufs,
348 for (i = 0; i < numbufs; i++) {
349 int bufoff, tocpy, thispage;
354 atomic_inc(&osd->od_zerocopy_pin);
356 bufoff = off - dbp[i]->db_offset;
357 tocpy = min_t(int, dbp[i]->db_size - bufoff, len);
359 /* kind of trick to differentiate dbuf vs. arcbuf */
360 LASSERT(((unsigned long)dbp[i] & 1) == 0);
361 dbf = (void *) ((unsigned long)dbp[i] | 1);
364 thispage = PAGE_CACHE_SIZE;
365 thispage -= bufoff & (PAGE_CACHE_SIZE - 1);
366 thispage = min(tocpy, thispage);
369 lnb->lnb_file_offset = off;
370 lnb->lnb_page_offset = bufoff & ~PAGE_MASK;
371 lnb->lnb_len = thispage;
372 lnb->lnb_page = kmem_to_page(dbp[i]->db_data +
374 /* mark just a single slot: we need this
375 * reference to dbuf to be released once */
388 /* steal dbuf so dmu_buf_rele_array() can't release
393 dmu_buf_rele_array(dbp, numbufs, osd_zerocopy_tag);
396 record_end_io(osd, READ, cfs_time_current() - start,
397 npages * PAGE_SIZE, npages);
403 osd_bufs_put(env, &obj->oo_dt, lnb - npages, npages);
407 static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj,
408 loff_t off, ssize_t len, struct niobuf_local *lnb)
410 struct osd_device *osd = osd_obj2dev(obj);
411 int plen, off_in_block, sz_in_block;
412 int rc, i = 0, npages = 0;
418 dmu_object_size_from_db(obj->oo_db, &bs, &dummy);
421 * currently only full blocks are subject to zerocopy approach:
422 * so that we're sure nobody is trying to update the same block
425 LASSERT(npages < PTLRPC_MAX_BRW_PAGES);
427 off_in_block = off & (bs - 1);
428 sz_in_block = min_t(int, bs - off_in_block, len);
430 if (sz_in_block == bs) {
431 /* full block, try to use zerocopy */
433 abuf = dmu_request_arcbuf(obj->oo_db, bs);
434 if (unlikely(abuf == NULL))
435 GOTO(out_err, rc = -ENOMEM);
437 atomic_inc(&osd->od_zerocopy_loan);
439 /* go over pages arcbuf contains, put them as
440 * local niobufs for ptlrpc's bulks */
441 while (sz_in_block > 0) {
442 plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
444 lnb[i].lnb_file_offset = off;
445 lnb[i].lnb_page_offset = 0;
446 lnb[i].lnb_len = plen;
448 if (sz_in_block == bs)
449 lnb[i].lnb_data = abuf;
451 lnb[i].lnb_data = NULL;
453 /* this one is not supposed to fail */
454 lnb[i].lnb_page = kmem_to_page(abuf->b_data +
456 LASSERT(lnb[i].lnb_page);
458 lprocfs_counter_add(osd->od_stats,
459 LPROC_OSD_ZEROCOPY_IO, 1);
464 off_in_block += plen;
469 if (off_in_block == 0 && len < bs &&
470 off + len >= obj->oo_attr.la_size)
471 lprocfs_counter_add(osd->od_stats,
472 LPROC_OSD_TAIL_IO, 1);
474 /* can't use zerocopy, allocate temp. buffers */
475 while (sz_in_block > 0) {
476 plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
478 lnb[i].lnb_file_offset = off;
479 lnb[i].lnb_page_offset = 0;
480 lnb[i].lnb_len = plen;
482 lnb[i].lnb_data = NULL;
484 lnb[i].lnb_page = alloc_page(OSD_GFP_IO);
485 if (unlikely(lnb[i].lnb_page == NULL))
486 GOTO(out_err, rc = -ENOMEM);
488 LASSERT(lnb[i].lnb_page->mapping == NULL);
489 lnb[i].lnb_page->mapping = (void *)obj;
491 atomic_inc(&osd->od_zerocopy_alloc);
492 lprocfs_counter_add(osd->od_stats,
493 LPROC_OSD_COPY_IO, 1);
507 osd_bufs_put(env, &obj->oo_dt, lnb, npages);
511 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
512 loff_t offset, ssize_t len, struct niobuf_local *lnb,
515 struct osd_object *obj = osd_dt_obj(dt);
518 LASSERT(dt_object_exists(dt));
522 rc = osd_bufs_get_read(env, obj, offset, len, lnb);
524 rc = osd_bufs_get_write(env, obj, offset, len, lnb);
529 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
530 struct niobuf_local *lnb, int npages)
532 struct osd_object *obj = osd_dt_obj(dt);
534 LASSERT(dt_object_exists(dt));
540 static inline uint32_t osd_get_blocksz(struct osd_object *obj)
547 dmu_object_size_from_db(obj->oo_db, &blksz, &unused);
551 static inline uint64_t osd_roundup2blocksz(uint64_t size,
557 size += offset % blksz;
559 if (likely(IS_PO2(blksz)))
560 return PO2_ROUNDUP_TYPED(size, blksz, uint64_t);
567 static int osd_declare_write_commit(const struct lu_env *env,
568 struct dt_object *dt,
569 struct niobuf_local *lnb, int npages,
572 struct osd_object *obj = osd_dt_obj(dt);
573 struct osd_device *osd = osd_obj2dev(obj);
574 struct osd_thandle *oh;
577 uint32_t blksz = osd_get_blocksz(obj);
578 int i, rc, flags = 0;
579 bool ignore_quota = false, synced = false;
581 struct page *last_page = NULL;
582 unsigned long discont_pages = 0;
585 LASSERT(dt_object_exists(dt));
591 oh = container_of0(th, struct osd_thandle, ot_super);
593 for (i = 0; i < npages; i++) {
594 if (last_page && lnb[i].lnb_page->index != (last_page->index + 1))
596 last_page = lnb[i].lnb_page;
598 /* ENOSPC, network RPC error, etc.
599 * We don't want to book space for pages which will be
600 * skipped in osd_write_commit(). Hence we skip pages
601 * with lnb_rc != 0 here too */
603 /* ignore quota for the whole request if any page is from
604 * client cache or written by root.
606 * XXX once we drop the 1.8 client support, the checking
607 * for whether page is from cache can be simplified as:
608 * !(lnb[i].flags & OBD_BRW_SYNC)
610 * XXX we could handle this on per-lnb basis as done by
612 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
613 (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
617 /* first valid lnb */
618 offset = lnb[i].lnb_file_offset;
619 size = lnb[i].lnb_len;
622 if (offset + size == lnb[i].lnb_file_offset) {
623 /* this lnb is contiguous to the previous one */
624 size += lnb[i].lnb_len;
628 dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object,
630 /* Estimating space to be consumed by a write is rather
631 * complicated with ZFS. As a consequence, we don't account for
632 * indirect blocks and just use as a rough estimate the worse
633 * case where the old space is being held by a snapshot. Quota
634 * overrun will be adjusted once the operation is committed, if
636 space += osd_roundup2blocksz(size, offset, blksz);
638 offset = lnb[i].lnb_file_offset;
639 size = lnb[i].lnb_len;
643 dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object,
645 space += osd_roundup2blocksz(size, offset, blksz);
648 dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
650 oh->ot_write_commit = 1; /* used in osd_trans_start() for fail_loc */
652 /* backend zfs filesystem might be configured to store multiple data
654 space *= osd->od_os->os_copies;
656 CDEBUG(D_QUOTA, "writing %d pages, reserving "LPD64"K of quota space\n",
659 record_start_io(osd, WRITE, discont_pages);
661 /* acquire quota space if needed */
662 rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
663 obj->oo_attr.la_gid, space, oh, true, &flags,
666 if (!synced && rc == -EDQUOT && (flags & QUOTA_FL_SYNC) != 0) {
667 dt_sync(env, th->th_dev);
669 CDEBUG(D_QUOTA, "retry after sync\n");
674 /* we need only to store the overquota flags in the first lnb for
675 * now, once we support multiple objects BRW, this code needs be
677 if (flags & QUOTA_FL_OVER_USRQUOTA)
678 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
679 if (flags & QUOTA_FL_OVER_GRPQUOTA)
680 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
685 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
686 struct niobuf_local *lnb, int npages,
689 struct osd_object *obj = osd_dt_obj(dt);
690 struct osd_device *osd = osd_obj2dev(obj);
691 struct osd_thandle *oh;
692 uint64_t new_size = 0;
694 unsigned long iosize = 0;
697 LASSERT(dt_object_exists(dt));
701 oh = container_of0(th, struct osd_thandle, ot_super);
703 for (i = 0; i < npages; i++) {
704 CDEBUG(D_INODE, "write %u bytes at %u\n",
705 (unsigned) lnb[i].lnb_len,
706 (unsigned) lnb[i].lnb_file_offset);
709 /* ENOSPC, network RPC error, etc.
710 * Unlike ldiskfs, zfs allocates new blocks on rewrite,
711 * so we skip this page if lnb_rc is set to -ENOSPC */
712 CDEBUG(D_INODE, "obj "DFID": skipping lnb[%u]: rc=%d\n",
713 PFID(lu_object_fid(&dt->do_lu)), i,
718 if (lnb[i].lnb_page->mapping == (void *)obj) {
719 dmu_write(osd->od_os, obj->oo_db->db_object,
720 lnb[i].lnb_file_offset, lnb[i].lnb_len,
721 kmap(lnb[i].lnb_page), oh->ot_tx);
722 kunmap(lnb[i].lnb_page);
723 } else if (lnb[i].lnb_data) {
724 LASSERT(((unsigned long)lnb[i].lnb_data & 1) == 0);
725 /* buffer loaned for zerocopy, try to use it.
726 * notice that dmu_assign_arcbuf() is smart
727 * enough to recognize changed blocksize
728 * in this case it fallbacks to dmu_write() */
729 dmu_assign_arcbuf(obj->oo_db, lnb[i].lnb_file_offset,
730 lnb[i].lnb_data, oh->ot_tx);
731 /* drop the reference, otherwise osd_put_bufs()
732 * will be releasing it - bad! */
733 lnb[i].lnb_data = NULL;
734 atomic_dec(&osd->od_zerocopy_loan);
737 if (new_size < lnb[i].lnb_file_offset + lnb[i].lnb_len)
738 new_size = lnb[i].lnb_file_offset + lnb[i].lnb_len;
739 iosize += lnb[i].lnb_len;
742 if (unlikely(new_size == 0)) {
743 /* no pages to write, no transno is needed */
745 /* it is important to return 0 even when all lnb_rc == -ENOSPC
746 * since ofd_commitrw_write() retries several times on ENOSPC */
747 record_end_io(osd, WRITE, 0, 0, 0);
751 write_lock(&obj->oo_attr_lock);
752 if (obj->oo_attr.la_size < new_size) {
753 obj->oo_attr.la_size = new_size;
754 write_unlock(&obj->oo_attr_lock);
755 /* osd_object_sa_update() will be copying directly from
756 * oo_attr into dbuf. any update within a single txg will copy
758 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
759 &obj->oo_attr.la_size, 8, oh);
761 write_unlock(&obj->oo_attr_lock);
764 record_end_io(osd, WRITE, 0, iosize, npages);
769 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
770 struct niobuf_local *lnb, int npages)
772 struct osd_object *obj = osd_dt_obj(dt);
776 LASSERT(dt_object_exists(dt));
779 read_lock(&obj->oo_attr_lock);
780 eof = obj->oo_attr.la_size;
781 read_unlock(&obj->oo_attr_lock);
783 for (i = 0; i < npages; i++) {
784 if (unlikely(lnb[i].lnb_rc < 0))
787 lnb[i].lnb_rc = lnb[i].lnb_len;
789 if (lnb[i].lnb_file_offset + lnb[i].lnb_len >= eof) {
790 if (eof <= lnb[i].lnb_file_offset)
793 lnb[i].lnb_rc = eof - lnb[i].lnb_file_offset;
795 /* all subsequent rc should be 0 */
806 * Punch/truncate an object
808 * IN: db - dmu_buf of the object to free data in.
809 * off - start of section to free.
810 * len - length of section to free (DMU_OBJECT_END => to EOF).
812 * RETURN: 0 if success
813 * error code if failure
815 * The transaction passed to this routine must have
816 * dmu_tx_hold_sa() and if off < size, dmu_tx_hold_free()
817 * called and then assigned to a transaction group.
819 static int __osd_object_punch(objset_t *os, dmu_buf_t *db, dmu_tx_t *tx,
820 uint64_t size, uint64_t off, uint64_t len)
824 /* Assert that the transaction has been assigned to a
825 transaction group. */
826 LASSERT(tx->tx_txg != 0);
828 * Nothing to do if file already at desired length.
830 if (len == DMU_OBJECT_END && size == off)
834 rc = -dmu_free_range(os, db->db_object, off, len, tx);
839 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
840 __u64 start, __u64 end, struct thandle *th)
842 struct osd_object *obj = osd_dt_obj(dt);
843 struct osd_device *osd = osd_obj2dev(obj);
844 struct osd_thandle *oh;
849 LASSERT(dt_object_exists(dt));
850 LASSERT(osd_invariant(obj));
853 oh = container_of0(th, struct osd_thandle, ot_super);
855 write_lock(&obj->oo_attr_lock);
857 if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
858 len = DMU_OBJECT_END;
861 write_unlock(&obj->oo_attr_lock);
863 rc = __osd_object_punch(osd->od_os, obj->oo_db, oh->ot_tx,
864 obj->oo_attr.la_size, start, len);
866 if (len == DMU_OBJECT_END) {
867 write_lock(&obj->oo_attr_lock);
868 obj->oo_attr.la_size = start;
869 write_unlock(&obj->oo_attr_lock);
870 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
871 &obj->oo_attr.la_size, 8, oh);
876 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
877 __u64 start, __u64 end, struct thandle *handle)
879 struct osd_object *obj = osd_dt_obj(dt);
880 struct osd_device *osd = osd_obj2dev(obj);
881 struct osd_thandle *oh;
885 oh = container_of0(handle, struct osd_thandle, ot_super);
887 read_lock(&obj->oo_attr_lock);
888 if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
889 len = DMU_OBJECT_END;
893 /* declare we'll free some blocks ... */
894 if (start < obj->oo_attr.la_size) {
895 read_unlock(&obj->oo_attr_lock);
896 dmu_tx_hold_free(oh->ot_tx, obj->oo_db->db_object, start, len);
898 read_unlock(&obj->oo_attr_lock);
901 /* ... and we'll modify size attribute */
902 dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
904 RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
905 obj->oo_attr.la_gid, 0, oh, true, NULL,
909 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
910 __u64 start, __u64 end, enum lu_ladvise_type advice)
924 struct dt_body_operations osd_body_ops = {
925 .dbo_read = osd_read,
926 .dbo_declare_write = osd_declare_write,
927 .dbo_write = osd_write,
928 .dbo_bufs_get = osd_bufs_get,
929 .dbo_bufs_put = osd_bufs_put,
930 .dbo_write_prep = osd_write_prep,
931 .dbo_declare_write_commit = osd_declare_write_commit,
932 .dbo_write_commit = osd_write_commit,
933 .dbo_read_prep = osd_read_prep,
934 .dbo_declare_punch = osd_declare_punch,
935 .dbo_punch = osd_punch,
936 .dbo_ladvise = osd_ladvise,