4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd-zfs/osd_io.c
34 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35 * Author: Mike Pershin <tappro@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_OSD
40 #include <libcfs/libcfs.h>
41 #include <obd_support.h>
42 #include <lustre_net.h>
44 #include <obd_class.h>
45 #include <lustre_disk.h>
46 #include <lustre_fid.h>
47 #include <lustre_quota.h>
49 #include "osd_internal.h"
51 #include <sys/dnode.h>
56 #include <sys/spa_impl.h>
57 #include <sys/zfs_znode.h>
58 #include <sys/dmu_tx.h>
59 #include <sys/dmu_objset.h>
60 #include <sys/dsl_prop.h>
61 #include <sys/sa_impl.h>
64 static char osd_0copy_tag[] = "zerocopy";
66 static void dbuf_set_pending_evict(dmu_buf_t *db)
68 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
69 dbi->db_pending_evict = TRUE;
72 static void record_start_io(struct osd_device *osd, int rw, int discont_pages)
74 struct obd_histogram *h = osd->od_brw_stats.hist;
77 atomic_inc(&osd->od_r_in_flight);
78 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
79 atomic_read(&osd->od_r_in_flight));
80 lprocfs_oh_tally(&h[BRW_R_DISCONT_PAGES], discont_pages);
83 atomic_inc(&osd->od_w_in_flight);
84 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
85 atomic_read(&osd->od_w_in_flight));
86 lprocfs_oh_tally(&h[BRW_W_DISCONT_PAGES], discont_pages);
91 static void record_end_io(struct osd_device *osd, int rw,
92 unsigned long elapsed, int disksize, int npages)
94 struct obd_histogram *h = osd->od_brw_stats.hist;
97 atomic_dec(&osd->od_r_in_flight);
99 atomic_dec(&osd->od_w_in_flight);
101 lprocfs_oh_tally_log2(&h[BRW_R_PAGES + rw], npages);
103 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE + rw], disksize);
105 lprocfs_oh_tally_log2(&h[BRW_R_IO_TIME + rw], elapsed);
108 static ssize_t __osd_read(const struct lu_env *env, struct dt_object *dt,
109 struct lu_buf *buf, loff_t *pos, size_t *size)
111 struct osd_object *obj = osd_dt_obj(dt);
115 LASSERT(dt_object_exists(dt));
118 read_lock(&obj->oo_attr_lock);
119 old_size = obj->oo_attr.la_size;
120 read_unlock(&obj->oo_attr_lock);
122 if (*pos + *size > old_size) {
126 *size = old_size - *pos;
129 rc = osd_dmu_read(osd_obj2dev(obj), obj->oo_dn, *pos, *size,
130 buf->lb_buf, DMU_READ_PREFETCH);
139 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
140 struct lu_buf *buf, loff_t *pos)
142 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
143 size_t size = buf->lb_len;
144 hrtime_t start = gethrtime();
148 record_start_io(osd, READ, 0);
149 rc = __osd_read(env, dt, buf, pos, &size);
150 delta_ms = gethrtime() - start;
151 do_div(delta_ms, NSEC_PER_MSEC);
152 record_end_io(osd, READ, delta_ms, size, size >> PAGE_SHIFT);
157 static inline ssize_t osd_read_no_record(const struct lu_env *env,
158 struct dt_object *dt,
159 struct lu_buf *buf, loff_t *pos)
161 size_t size = buf->lb_len;
163 return __osd_read(env, dt, buf, pos, &size);
166 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
167 const struct lu_buf *buf, loff_t pos,
170 struct osd_object *obj = osd_dt_obj(dt);
171 struct osd_device *osd = osd_obj2dev(obj);
172 struct osd_thandle *oh;
176 oh = container_of(th, struct osd_thandle, ot_super);
178 /* in some cases declare can race with creation (e.g. llog)
179 * and we need to wait till object is initialized. notice
180 * LOHA_EXISTs is supposed to be the last step in the
183 /* size change (in dnode) will be declared by dmu_tx_hold_write() */
184 if (dt_object_exists(dt))
185 oid = obj->oo_dn->dn_object;
187 oid = DMU_NEW_OBJECT;
189 /* XXX: we still miss for append declaration support in ZFS
190 * -1 means append which is used by llog mostly, llog
191 * can grow upto LLOG_MIN_CHUNK_SIZE*8 records */
193 pos = max_t(loff_t, 256 * 8 * LLOG_MIN_CHUNK_SIZE,
194 obj->oo_attr.la_size + (2 << 20));
195 osd_tx_hold_write(oh->ot_tx, oid, obj->oo_dn, pos, buf->lb_len);
197 /* dt_declare_write() is usually called for system objects, such
198 * as llog or last_rcvd files. We needn't enforce quota on those
199 * objects, so always set the lqi_space as 0. */
200 RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
201 obj->oo_attr.la_gid, obj->oo_attr.la_projid,
202 0, oh, NULL, OSD_QID_BLK));
205 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
206 const struct lu_buf *buf, loff_t *pos,
209 struct osd_object *obj = osd_dt_obj(dt);
210 struct osd_device *osd = osd_obj2dev(obj);
211 struct osd_thandle *oh;
212 uint64_t offset = *pos;
217 LASSERT(dt_object_exists(dt));
221 oh = container_of(th, struct osd_thandle, ot_super);
223 osd_dmu_write(osd, obj->oo_dn, offset, (uint64_t)buf->lb_len,
224 buf->lb_buf, oh->ot_tx);
225 write_lock(&obj->oo_attr_lock);
226 if (obj->oo_attr.la_size < offset + buf->lb_len) {
227 obj->oo_attr.la_size = offset + buf->lb_len;
228 write_unlock(&obj->oo_attr_lock);
229 /* osd_object_sa_update() will be copying directly from oo_attr
230 * into dbuf. any update within a single txg will copy the
232 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
233 &obj->oo_attr.la_size, 8, oh);
237 write_unlock(&obj->oo_attr_lock);
248 * XXX: for the moment I don't want to use lnb_flags for osd-internal
249 * purposes as it's not very well defined ...
250 * instead I use the lowest bit of the address so that:
251 * arc buffer: .lnb_data = abuf (arc we loan for write)
252 * dbuf buffer: .lnb_data = dbuf | 1 (dbuf we get for read)
253 * copy buffer: .lnb_page->mapping = obj (page we allocate for write)
257 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
258 struct niobuf_local *lnb, int npages)
260 struct osd_object *obj = osd_dt_obj(dt);
261 struct osd_device *osd = osd_obj2dev(obj);
265 LASSERT(dt_object_exists(dt));
268 for (i = 0; i < npages; i++) {
269 if (lnb[i].lnb_page == NULL)
271 if (lnb[i].lnb_page->mapping == (void *)obj) {
272 /* this is anonymous page allocated for copy-write */
273 lnb[i].lnb_page->mapping = NULL;
274 __free_page(lnb[i].lnb_page);
275 atomic_dec(&osd->od_zerocopy_alloc);
277 /* see comment in osd_bufs_get_read() */
278 ptr = (unsigned long)lnb[i].lnb_data;
281 dmu_buf_rele((void *)ptr, osd_0copy_tag);
282 atomic_dec(&osd->od_zerocopy_pin);
283 } else if (lnb[i].lnb_data != NULL) {
284 int j, apages, abufsz;
285 abufsz = arc_buf_size(lnb[i].lnb_data);
286 apages = abufsz >> PAGE_SHIFT;
287 /* these references to pages must be invalidated
288 * to prevent access in osd_bufs_put() */
289 for (j = 0; j < apages; j++)
290 lnb[i + j].lnb_page = NULL;
291 dmu_return_arcbuf(lnb[i].lnb_data);
292 atomic_dec(&osd->od_zerocopy_loan);
295 lnb[i].lnb_page = NULL;
296 lnb[i].lnb_data = NULL;
302 static inline struct page *kmem_to_page(void *addr)
304 LASSERT(!((unsigned long)addr & ~PAGE_MASK));
305 if (is_vmalloc_addr(addr))
306 return vmalloc_to_page(addr);
308 return virt_to_page(addr);
312 * Prepare buffers for read.
314 * The function maps the range described by \a off and \a len to \a lnb array.
315 * dmu_buf_hold_array_by_bonus() finds/creates appropriate ARC buffers, then
316 * we fill \a lnb array with the pages storing ARC buffers. Notice the current
317 * implementationt passes TRUE to dmu_buf_hold_array_by_bonus() to fill ARC
318 * buffers with actual data, I/O is done in the conext of osd_bufs_get_read().
319 * A better implementation would just return the buffers (potentially unfilled)
320 * and subsequent osd_read_prep() would do I/O for many ranges concurrently.
322 * \param[in] env environment
323 * \param[in] obj object
324 * \param[in] off offset in bytes
325 * \param[in] len the number of bytes to access
326 * \param[out] lnb array of local niobufs pointing to the buffers with data
328 * \retval 0 for success
329 * \retval negative error number of failure
331 static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
332 loff_t off, ssize_t len, struct niobuf_local *lnb,
335 struct osd_device *osd = osd_obj2dev(obj);
336 int rc, i, numbufs, npages = 0, drop_cache = 0;
337 hrtime_t start = gethrtime();
342 record_start_io(osd, READ, 0);
344 if (obj->oo_attr.la_size >= osd->od_readcache_max_filesize)
347 /* grab buffers for read:
348 * OSD API let us to grab buffers first, then initiate IO(s)
349 * so that all required IOs will be done in parallel, but at the
350 * moment DMU doesn't provide us with a method to grab buffers.
351 * If we discover this is a vital for good performance we
352 * can get own replacement for dmu_buf_hold_array_by_bonus().
355 (obj->oo_dn->dn_datablkshift != 0 ||
356 off < obj->oo_dn->dn_datablksz)) {
357 if (obj->oo_dn->dn_datablkshift == 0 &&
358 off + len > obj->oo_dn->dn_datablksz)
359 len = obj->oo_dn->dn_datablksz - off;
362 if (unlikely(npages >= maxlnb))
363 GOTO(err, rc = -EOVERFLOW);
365 rc = -dmu_buf_hold_array_by_bonus(&obj->oo_dn->dn_bonus->db,
366 off, len, TRUE, osd_0copy_tag,
371 for (i = 0; i < numbufs; i++) {
372 int bufoff, tocpy, thispage;
377 atomic_inc(&osd->od_zerocopy_pin);
379 bufoff = off - dbp[i]->db_offset;
380 tocpy = min_t(int, dbp[i]->db_size - bufoff, len);
382 /* kind of trick to differentiate dbuf vs. arcbuf */
383 LASSERT(((unsigned long)dbp[i] & 1) == 0);
384 dbf = (void *) ((unsigned long)dbp[i] | 1);
387 if (unlikely(npages >= maxlnb))
388 GOTO(err, rc = -EOVERFLOW);
390 thispage = PAGE_SIZE;
391 thispage -= bufoff & (PAGE_SIZE - 1);
392 thispage = min(tocpy, thispage);
395 lnb->lnb_file_offset = off;
396 lnb->lnb_page_offset = bufoff & ~PAGE_MASK;
397 lnb->lnb_len = thispage;
398 lnb->lnb_page = kmem_to_page(dbp[i]->db_data +
400 /* mark just a single slot: we need this
401 * reference to dbuf to be released once */
415 dbuf_set_pending_evict(dbp[i]);
417 /* steal dbuf so dmu_buf_rele_array() can't release
422 dmu_buf_rele_array(dbp, numbufs, osd_0copy_tag);
425 delta_ms = gethrtime() - start;
426 do_div(delta_ms, NSEC_PER_MSEC);
427 record_end_io(osd, READ, delta_ms, npages * PAGE_SIZE, npages);
434 dmu_buf_rele_array(dbp, numbufs, osd_0copy_tag);
435 osd_bufs_put(env, &obj->oo_dt, lnb - npages, npages);
439 static inline arc_buf_t *osd_request_arcbuf(dnode_t *dn, size_t bs)
443 abuf = dmu_request_arcbuf(&dn->dn_bonus->db, bs);
445 return ERR_PTR(-ENOMEM);
447 #if ZFS_VERSION_CODE < OBD_OCD_VERSION(0, 7, 0, 0)
449 * ZFS prior to 0.7.0 doesn't guarantee PAGE_SIZE alignment for zio
450 * blocks smaller than (PAGE_SIZE << 2). This poses a problem of
451 * setting up page array for RDMA transfer. See LU-9305.
453 if ((unsigned long)abuf->b_data & ~PAGE_MASK) {
454 dmu_return_arcbuf(abuf);
462 static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj,
463 loff_t off, ssize_t len, struct niobuf_local *lnb,
466 struct osd_device *osd = osd_obj2dev(obj);
467 int poff, plen, off_in_block, sz_in_block;
468 int rc, i = 0, npages = 0;
469 dnode_t *dn = obj->oo_dn;
471 uint32_t bs = dn->dn_datablksz;
475 * currently only full blocks are subject to zerocopy approach:
476 * so that we're sure nobody is trying to update the same block
479 if (unlikely(npages >= maxlnb))
480 GOTO(out_err, rc = -EOVERFLOW);
482 off_in_block = off & (bs - 1);
483 sz_in_block = min_t(int, bs - off_in_block, len);
486 if (sz_in_block == bs) {
487 /* full block, try to use zerocopy */
488 abuf = osd_request_arcbuf(dn, bs);
489 if (unlikely(IS_ERR(abuf)))
490 GOTO(out_err, rc = PTR_ERR(abuf));
494 atomic_inc(&osd->od_zerocopy_loan);
496 /* go over pages arcbuf contains, put them as
497 * local niobufs for ptlrpc's bulks */
498 while (sz_in_block > 0) {
499 plen = min_t(int, sz_in_block, PAGE_SIZE);
501 if (unlikely(npages >= maxlnb))
502 GOTO(out_err, rc = -EOVERFLOW);
504 lnb[i].lnb_file_offset = off;
505 lnb[i].lnb_page_offset = 0;
506 lnb[i].lnb_len = plen;
508 if (sz_in_block == bs)
509 lnb[i].lnb_data = abuf;
511 lnb[i].lnb_data = NULL;
513 /* this one is not supposed to fail */
514 lnb[i].lnb_page = kmem_to_page(abuf->b_data +
516 LASSERT(lnb[i].lnb_page);
518 lprocfs_counter_add(osd->od_stats,
519 LPROC_OSD_ZEROCOPY_IO, 1);
524 off_in_block += plen;
529 if (off_in_block == 0 && len < bs &&
530 off + len >= obj->oo_attr.la_size)
531 lprocfs_counter_add(osd->od_stats,
532 LPROC_OSD_TAIL_IO, 1);
534 /* can't use zerocopy, allocate temp. buffers */
535 poff = off & (PAGE_SIZE - 1);
536 while (sz_in_block > 0) {
537 plen = min_t(int, poff + sz_in_block,
541 if (unlikely(npages >= maxlnb))
542 GOTO(out_err, rc = -EOVERFLOW);
544 lnb[i].lnb_file_offset = off;
545 lnb[i].lnb_page_offset = poff;
548 lnb[i].lnb_len = plen;
550 lnb[i].lnb_data = NULL;
552 lnb[i].lnb_page = alloc_page(OSD_GFP_IO);
553 if (unlikely(lnb[i].lnb_page == NULL))
554 GOTO(out_err, rc = -ENOMEM);
556 LASSERT(lnb[i].lnb_page->mapping == NULL);
557 lnb[i].lnb_page->mapping = (void *)obj;
559 atomic_inc(&osd->od_zerocopy_alloc);
560 lprocfs_counter_add(osd->od_stats,
561 LPROC_OSD_COPY_IO, 1);
575 osd_bufs_put(env, &obj->oo_dt, lnb, npages);
579 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
580 loff_t offset, ssize_t len, struct niobuf_local *lnb,
581 int maxlnb, enum dt_bufs_type rw)
583 struct osd_object *obj = osd_dt_obj(dt);
586 LASSERT(dt_object_exists(dt));
589 if (rw & DT_BUFS_TYPE_WRITE)
590 rc = osd_bufs_get_write(env, obj, offset, len, lnb, maxlnb);
592 rc = osd_bufs_get_read(env, obj, offset, len, lnb, maxlnb);
597 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
598 struct niobuf_local *lnb, int npages)
600 struct osd_object *obj = osd_dt_obj(dt);
602 LASSERT(dt_object_exists(dt));
608 static inline uint64_t osd_roundup2blocksz(uint64_t size,
614 size += offset % blksz;
616 if (likely(is_power_of_2(blksz)))
617 return round_up(size, blksz);
619 return DIV_ROUND_UP_ULL(size, blksz) * blksz;
622 static int osd_declare_write_commit(const struct lu_env *env,
623 struct dt_object *dt,
624 struct niobuf_local *lnb, int npages,
627 struct osd_object *obj = osd_dt_obj(dt);
628 struct osd_device *osd = osd_obj2dev(obj);
629 struct osd_thandle *oh;
632 uint32_t blksz = obj->oo_dn->dn_datablksz;
636 struct page *last_page = NULL;
637 unsigned long discont_pages = 0;
638 enum osd_quota_local_flags local_flags = 0;
639 enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
642 LASSERT(dt_object_exists(dt));
648 oh = container_of(th, struct osd_thandle, ot_super);
650 for (i = 0; i < npages; i++) {
651 if (last_page && lnb[i].lnb_page->index != (last_page->index + 1))
653 last_page = lnb[i].lnb_page;
655 /* ENOSPC, network RPC error, etc.
656 * We don't want to book space for pages which will be
657 * skipped in osd_write_commit(). Hence we skip pages
658 * with lnb_rc != 0 here too */
660 /* ignore quota for the whole request if any page is from
661 * client cache or written by root.
663 * XXX once we drop the 1.8 client support, the checking
664 * for whether page is from cache can be simplified as:
665 * !(lnb[i].flags & OBD_BRW_SYNC)
667 * XXX we could handle this on per-lnb basis as done by
669 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
670 (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
672 declare_flags |= OSD_QID_FORCE;
675 /* first valid lnb */
676 offset = lnb[i].lnb_file_offset;
677 size = lnb[i].lnb_len;
680 if (offset + size == lnb[i].lnb_file_offset) {
681 /* this lnb is contiguous to the previous one */
682 size += lnb[i].lnb_len;
686 osd_tx_hold_write(oh->ot_tx, obj->oo_dn->dn_object,
687 obj->oo_dn, offset, size);
688 /* Estimating space to be consumed by a write is rather
689 * complicated with ZFS. As a consequence, we don't account for
690 * indirect blocks and just use as a rough estimate the worse
691 * case where the old space is being held by a snapshot. Quota
692 * overrun will be adjusted once the operation is committed, if
694 space += osd_roundup2blocksz(size, offset, blksz);
696 offset = lnb[i].lnb_file_offset;
697 size = lnb[i].lnb_len;
701 osd_tx_hold_write(oh->ot_tx, obj->oo_dn->dn_object, obj->oo_dn,
703 space += osd_roundup2blocksz(size, offset, blksz);
706 /* backend zfs filesystem might be configured to store multiple data
708 space *= osd->od_os->os_copies;
710 CDEBUG(D_QUOTA, "writing %d pages, reserving %lldK of quota space\n",
713 record_start_io(osd, WRITE, discont_pages);
715 /* acquire quota space if needed */
716 rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
717 obj->oo_attr.la_gid, obj->oo_attr.la_projid,
718 space, oh, &local_flags, declare_flags);
720 if (!synced && rc == -EDQUOT &&
721 (local_flags & QUOTA_FL_SYNC) != 0) {
722 dt_sync(env, th->th_dev);
724 CDEBUG(D_QUOTA, "retry after sync\n");
729 /* we need only to store the overquota flags in the first lnb for
730 * now, once we support multiple objects BRW, this code needs be
732 if (local_flags & QUOTA_FL_OVER_USRQUOTA)
733 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
734 if (local_flags & QUOTA_FL_OVER_GRPQUOTA)
735 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
736 #ifdef ZFS_PROJINHERIT
737 if (local_flags & QUOTA_FL_OVER_PRJQUOTA)
738 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
745 * Policy to grow ZFS block size by write pattern.
746 * For sequential write, it grows block size gradually until it reaches the
747 * maximum blocksize the dataset can support. Otherwise, it will pick a
748 * a block size by the writing region of this I/O.
750 static int osd_grow_blocksize(struct osd_object *obj, struct osd_thandle *oh,
751 uint64_t start, uint64_t end)
753 struct osd_device *osd = osd_obj2dev(obj);
754 dnode_t *dn = obj->oo_dn;
760 if (dn->dn_maxblkid > 0) /* can't change block size */
763 if (dn->dn_datablksz >= osd->od_max_blksz)
766 down_write(&obj->oo_guard);
768 blksz = dn->dn_datablksz;
769 if (blksz >= osd->od_max_blksz) /* check again after grabbing lock */
770 GOTO(out_unlock, rc);
772 /* now ZFS can support up to 16MB block size, and if the write
773 * is sequential, it just increases the block size gradually */
774 if (start <= blksz) { /* sequential */
775 blksz = (uint32_t)min_t(uint64_t, osd->od_max_blksz, end);
776 } else { /* sparse, pick a block size by write region */
777 blksz = (uint32_t)min_t(uint64_t, osd->od_max_blksz,
781 if (!is_power_of_2(blksz))
782 blksz = size_roundup_power2(blksz);
784 if (blksz > dn->dn_datablksz) {
785 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
786 blksz, 0, oh->ot_tx);
787 LASSERT(ergo(rc == 0, dn->dn_datablksz >= blksz));
789 CDEBUG(D_INODE, "object "DFID": change block size"
790 "%u -> %u error rc = %d\n",
791 PFID(lu_object_fid(&obj->oo_dt.do_lu)),
792 dn->dn_datablksz, blksz, rc);
796 up_write(&obj->oo_guard);
801 static void osd_evict_dbufs_after_write(struct osd_object *obj,
802 loff_t off, ssize_t len)
807 rc = -dmu_buf_hold_array_by_bonus(&obj->oo_dn->dn_bonus->db, off, len,
808 TRUE, osd_0copy_tag, &numbufs, &dbp);
812 for (i = 0; i < numbufs; i++)
813 dbuf_set_pending_evict(dbp[i]);
815 dmu_buf_rele_array(dbp, numbufs, osd_0copy_tag);
818 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
819 struct niobuf_local *lnb, int npages,
820 struct thandle *th, __u64 user_size)
822 struct osd_object *obj = osd_dt_obj(dt);
823 struct osd_device *osd = osd_obj2dev(obj);
824 struct osd_thandle *oh;
825 uint64_t new_size = 0;
826 int i, abufsz, rc = 0, drop_cache = 0;
827 unsigned long iosize = 0;
830 LASSERT(dt_object_exists(dt));
834 oh = container_of(th, struct osd_thandle, ot_super);
836 /* adjust block size. Assume the buffers are sorted. */
837 (void)osd_grow_blocksize(obj, oh, lnb[0].lnb_file_offset,
838 lnb[npages - 1].lnb_file_offset +
839 lnb[npages - 1].lnb_len);
841 if (obj->oo_attr.la_size >= osd->od_readcache_max_filesize ||
842 lnb[npages - 1].lnb_file_offset + lnb[npages - 1].lnb_len >=
843 osd->od_readcache_max_filesize)
846 if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC))
849 /* if la_size is already bigger than specified user_size,
852 if (obj->oo_attr.la_size > user_size)
855 /* LU-8791: take oo_guard to avoid the deadlock that changing block
856 * size and assigning arcbuf take place at the same time.
860 * -> osd_grow_blocksize() with osd_object::oo_guard held
861 * -> dmu_object_set_blocksize()
862 * -> dnode_set_blksz(), with dnode_t::dn_struct_rwlock
865 * -> dmu_buf_will_dirty()
867 * -> wait for the dbuf state to change
870 * -> dmu_assign_arcbuf()
871 * -> dbuf_assign_arcbuf(), set dbuf state to DB_FILL
873 * -> try to hold the read lock of dnode_t::dn_struct_rwlock
875 * By taking the read lock, it can avoid thread 2 to enter into the
876 * critical section of assigning the arcbuf, while thread 1 is
877 * changing the block size.
879 down_read(&obj->oo_guard);
880 for (i = 0; i < npages; i++) {
881 CDEBUG(D_INODE, "write %u bytes at %u\n",
882 (unsigned) lnb[i].lnb_len,
883 (unsigned) lnb[i].lnb_file_offset);
886 /* ENOSPC, network RPC error, etc.
887 * Unlike ldiskfs, zfs allocates new blocks on rewrite,
888 * so we skip this page if lnb_rc is set to -ENOSPC */
889 CDEBUG(D_INODE, "obj "DFID": skipping lnb[%u]: rc=%d\n",
890 PFID(lu_object_fid(&dt->do_lu)), i,
895 if (new_size < lnb[i].lnb_file_offset + lnb[i].lnb_len)
896 new_size = lnb[i].lnb_file_offset + lnb[i].lnb_len;
897 if (lnb[i].lnb_page == NULL)
900 if (lnb[i].lnb_page->mapping == (void *)obj) {
901 osd_dmu_write(osd, obj->oo_dn, lnb[i].lnb_file_offset,
902 lnb[i].lnb_len, kmap(lnb[i].lnb_page) +
903 lnb[i].lnb_page_offset, oh->ot_tx);
904 kunmap(lnb[i].lnb_page);
905 iosize += lnb[i].lnb_len;
906 abufsz = lnb[i].lnb_len; /* to drop cache below */
907 } else if (lnb[i].lnb_data) {
909 LASSERT(((unsigned long)lnb[i].lnb_data & 1) == 0);
910 /* buffer loaned for zerocopy, try to use it.
911 * notice that dmu_assign_arcbuf() is smart
912 * enough to recognize changed blocksize
913 * in this case it fallbacks to dmu_write() */
914 abufsz = arc_buf_size(lnb[i].lnb_data);
915 LASSERT(abufsz & PAGE_MASK);
916 apages = abufsz >> PAGE_SHIFT;
917 LASSERT(i + apages <= npages);
918 /* these references to pages must be invalidated
919 * to prevent access in osd_bufs_put() */
920 for (j = 0; j < apages; j++)
921 lnb[i + j].lnb_page = NULL;
922 dmu_assign_arcbuf(&obj->oo_dn->dn_bonus->db,
923 lnb[i].lnb_file_offset,
924 lnb[i].lnb_data, oh->ot_tx);
925 /* drop the reference, otherwise osd_put_bufs()
926 * will be releasing it - bad! */
927 lnb[i].lnb_data = NULL;
928 atomic_dec(&osd->od_zerocopy_loan);
931 /* we don't want to deal with cache if nothing
932 * has been send to ZFS at this step */
939 /* we have to mark dbufs for eviction here because
940 * dmu_assign_arcbuf() may create a new dbuf for
942 osd_evict_dbufs_after_write(obj, lnb[i].lnb_file_offset,
945 up_read(&obj->oo_guard);
947 if (unlikely(new_size == 0)) {
948 /* no pages to write, no transno is needed */
950 /* it is important to return 0 even when all lnb_rc == -ENOSPC
951 * since ofd_commitrw_write() retries several times on ENOSPC */
952 record_end_io(osd, WRITE, 0, 0, 0);
956 /* if file has grown, take user_size into account */
957 if (user_size && new_size > user_size)
958 new_size = user_size;
959 write_lock(&obj->oo_attr_lock);
960 if (obj->oo_attr.la_size < new_size) {
961 obj->oo_attr.la_size = new_size;
962 write_unlock(&obj->oo_attr_lock);
963 /* osd_object_sa_update() will be copying directly from
964 * oo_attr into dbuf. any update within a single txg will copy
966 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
967 &obj->oo_attr.la_size, 8, oh);
969 write_unlock(&obj->oo_attr_lock);
972 record_end_io(osd, WRITE, 0, iosize, npages);
977 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
978 struct niobuf_local *lnb, int npages)
980 struct osd_object *obj = osd_dt_obj(dt);
984 LASSERT(dt_object_exists(dt));
987 read_lock(&obj->oo_attr_lock);
988 eof = obj->oo_attr.la_size;
989 read_unlock(&obj->oo_attr_lock);
991 for (i = 0; i < npages; i++) {
992 if (unlikely(lnb[i].lnb_rc < 0))
995 lnb[i].lnb_rc = lnb[i].lnb_len;
997 if (lnb[i].lnb_file_offset + lnb[i].lnb_len >= eof) {
998 /* send complete pages all the time */
999 if (eof <= lnb[i].lnb_file_offset)
1002 /* all subsequent rc should be 0 */
1003 while (++i < npages)
1013 * Punch/truncate an object
1015 * IN: db - dmu_buf of the object to free data in.
1016 * off - start of section to free.
1017 * len - length of section to free (DMU_OBJECT_END => to EOF).
1019 * RETURN: 0 if success
1020 * error code if failure
1022 * The transaction passed to this routine must have
1023 * dmu_tx_hold_sa() and if off < size, dmu_tx_hold_free()
1024 * called and then assigned to a transaction group.
1026 static int __osd_object_punch(struct osd_object *obj, objset_t *os,
1027 dmu_tx_t *tx, uint64_t off, uint64_t len)
1029 dnode_t *dn = obj->oo_dn;
1030 uint64_t size = obj->oo_attr.la_size;
1033 /* Assert that the transaction has been assigned to a
1034 transaction group. */
1035 LASSERT(tx->tx_txg != 0);
1037 * Nothing to do if file already at desired length.
1039 if (len == DMU_OBJECT_END && size == off)
1042 /* if object holds encrypted content, we need to make sure we truncate
1043 * on an encryption unit boundary, or subsequent reads will get
1046 if (len != DMU_OBJECT_END)
1047 len -= LUSTRE_ENCRYPTION_UNIT_SIZE -
1048 (off & ~LUSTRE_ENCRYPTION_MASK);
1049 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
1050 off & ~LUSTRE_ENCRYPTION_MASK)
1051 off = (off & LUSTRE_ENCRYPTION_MASK) +
1052 LUSTRE_ENCRYPTION_UNIT_SIZE;
1055 /* XXX: dnode_free_range() can be used to save on dnode lookup */
1057 dmu_free_range(os, dn->dn_object, off, len, tx);
1062 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1063 __u64 start, __u64 end, struct thandle *th)
1065 struct osd_object *obj = osd_dt_obj(dt);
1066 struct osd_device *osd = osd_obj2dev(obj);
1067 struct osd_thandle *oh;
1072 LASSERT(dt_object_exists(dt));
1073 LASSERT(osd_invariant(obj));
1075 LASSERT(th != NULL);
1076 oh = container_of(th, struct osd_thandle, ot_super);
1078 write_lock(&obj->oo_attr_lock);
1080 if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
1081 len = DMU_OBJECT_END;
1084 write_unlock(&obj->oo_attr_lock);
1086 rc = __osd_object_punch(obj, osd->od_os, oh->ot_tx, start, len);
1089 if (len == DMU_OBJECT_END) {
1090 write_lock(&obj->oo_attr_lock);
1091 obj->oo_attr.la_size = start;
1092 write_unlock(&obj->oo_attr_lock);
1093 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
1094 &obj->oo_attr.la_size, 8, oh);
1099 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1100 __u64 start, __u64 end, struct thandle *handle)
1102 struct osd_object *obj = osd_dt_obj(dt);
1103 struct osd_device *osd = osd_obj2dev(obj);
1104 struct osd_thandle *oh;
1108 oh = container_of(handle, struct osd_thandle, ot_super);
1110 read_lock(&obj->oo_attr_lock);
1111 if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
1112 len = DMU_OBJECT_END;
1116 /* declare we'll free some blocks ... */
1117 /* if object holds encrypted content, we need to make sure we truncate
1118 * on an encryption unit boundary, or subsequent reads will get
1121 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
1122 start & ~LUSTRE_ENCRYPTION_MASK)
1123 start = (start & LUSTRE_ENCRYPTION_MASK) +
1124 LUSTRE_ENCRYPTION_UNIT_SIZE;
1125 if (start < obj->oo_attr.la_size) {
1126 read_unlock(&obj->oo_attr_lock);
1127 dmu_tx_mark_netfree(oh->ot_tx);
1128 dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object, start, len);
1130 read_unlock(&obj->oo_attr_lock);
1133 RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
1134 obj->oo_attr.la_gid, obj->oo_attr.la_projid,
1135 0, oh, NULL, OSD_QID_BLK));
1138 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
1139 __u64 start, __u64 end, enum lu_ladvise_type advice)
1153 static int osd_fallocate(const struct lu_env *env, struct dt_object *dt,
1154 __u64 start, __u64 end, int mode, struct thandle *th)
1156 int rc = -EOPNOTSUPP;
1160 * space preallocation is not supported for ZFS
1161 * Returns -EOPNOTSUPP for now
1166 static int osd_declare_fallocate(const struct lu_env *env,
1167 struct dt_object *dt, __u64 start, __u64 end,
1168 int mode, struct thandle *th)
1170 int rc = -EOPNOTSUPP;
1174 * space preallocation is not supported for ZFS
1175 * Returns -EOPNOTSUPP for now
1180 static loff_t osd_lseek(const struct lu_env *env, struct dt_object *dt,
1181 loff_t offset, int whence)
1183 struct osd_object *obj = osd_dt_obj(dt);
1184 uint64_t size = obj->oo_attr.la_size;
1185 uint64_t result = offset;
1187 boolean_t hole = whence == SEEK_HOLE;
1191 LASSERT(dt_object_exists(dt));
1192 LASSERT(osd_invariant(obj));
1193 LASSERT(offset >= 0);
1195 /* for SEEK_HOLE treat 'offset' beyond the end of file as in real
1196 * hole. LOV to decide after all if that real hole or not.
1199 RETURN(hole ? offset : -ENXIO);
1201 rc = osd_dmu_offset_next(osd_obj2dev(obj)->od_os,
1202 obj->oo_dn->dn_object, hole, &result);
1206 /* file was dirty, so fall back to using generic logic:
1207 * For HOLE return file size, for DATA the result is set
1208 * already to the 'offset' parameter value.
1210 if (rc == EBUSY && hole)
1213 /* dmu_offset_next() only works on whole blocks so may return SEEK_HOLE
1214 * result as end of the last block instead of logical EOF which we need
1222 const struct dt_body_operations osd_body_ops = {
1223 .dbo_read = osd_read,
1224 .dbo_declare_write = osd_declare_write,
1225 .dbo_write = osd_write,
1226 .dbo_bufs_get = osd_bufs_get,
1227 .dbo_bufs_put = osd_bufs_put,
1228 .dbo_write_prep = osd_write_prep,
1229 .dbo_declare_write_commit = osd_declare_write_commit,
1230 .dbo_write_commit = osd_write_commit,
1231 .dbo_read_prep = osd_read_prep,
1232 .dbo_declare_punch = osd_declare_punch,
1233 .dbo_punch = osd_punch,
1234 .dbo_ladvise = osd_ladvise,
1235 .dbo_declare_fallocate = osd_declare_fallocate,
1236 .dbo_fallocate = osd_fallocate,
1237 .dbo_lseek = osd_lseek,
1240 const struct dt_body_operations osd_body_scrub_ops = {
1241 .dbo_read = osd_read_no_record,
1242 .dbo_declare_write = osd_declare_write,
1243 .dbo_write = osd_write,