Whamcloud - gitweb
47058d2d703a4f2739b400aa33959c251954d200
[fs/lustre-release.git] / lustre / osd-zfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osd-zfs/osd_io.c
37  *
38  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
39  * Author: Mike Pershin <tappro@whamcloud.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_OSD
43
44 #include <lustre_ver.h>
45 #include <libcfs/libcfs.h>
46 #include <obd_support.h>
47 #include <lustre_net.h>
48 #include <obd.h>
49 #include <obd_class.h>
50 #include <lustre_disk.h>
51 #include <lustre_fid.h>
52 #include <lustre/lustre_idl.h>  /* LLOG_CHUNK_SIZE definition */
53
54 #include "osd_internal.h"
55
56 #include <sys/dnode.h>
57 #include <sys/dbuf.h>
58 #include <sys/spa.h>
59 #include <sys/stat.h>
60 #include <sys/zap.h>
61 #include <sys/spa_impl.h>
62 #include <sys/zfs_znode.h>
63 #include <sys/dmu_tx.h>
64 #include <sys/dmu_objset.h>
65 #include <sys/dsl_prop.h>
66 #include <sys/sa_impl.h>
67 #include <sys/txg.h>
68
69 static char *osd_zerocopy_tag = "zerocopy";
70
71
72 static void record_start_io(struct osd_device *osd, int rw, int npages,
73                             int discont_pages)
74 {
75         struct obd_histogram *h = osd->od_brw_stats.hist;
76
77         if (rw == READ) {
78                 atomic_inc(&osd->od_r_in_flight);
79                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
80                                  atomic_read(&osd->od_r_in_flight));
81                 lprocfs_oh_tally_log2(&h[BRW_R_PAGES], npages);
82                 lprocfs_oh_tally(&h[BRW_R_DISCONT_PAGES], discont_pages);
83
84         } else {
85                 atomic_inc(&osd->od_w_in_flight);
86                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
87                                  atomic_read(&osd->od_w_in_flight));
88                 lprocfs_oh_tally_log2(&h[BRW_W_PAGES], npages);
89                 lprocfs_oh_tally(&h[BRW_W_DISCONT_PAGES], discont_pages);
90
91         }
92 }
93
94 static void record_end_io(struct osd_device *osd, int rw,
95                           unsigned long elapsed, int disksize)
96 {
97         struct obd_histogram *h = osd->od_brw_stats.hist;
98
99         if (rw == READ) {
100                 atomic_dec(&osd->od_r_in_flight);
101                 if (disksize > 0)
102                         lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], disksize);
103                 if (elapsed)
104                         lprocfs_oh_tally_log2(&h[BRW_R_IO_TIME], elapsed);
105
106         } else {
107                 atomic_dec(&osd->od_w_in_flight);
108                 if (disksize > 0)
109                         lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], disksize);
110                 if (elapsed)
111                         lprocfs_oh_tally_log2(&h[BRW_W_IO_TIME], elapsed);
112         }
113 }
114
115 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
116                         struct lu_buf *buf, loff_t *pos,
117                         struct lustre_capa *capa)
118 {
119         struct osd_object *obj  = osd_dt_obj(dt);
120         struct osd_device *osd = osd_obj2dev(obj);
121         uint64_t           old_size;
122         int                size = buf->lb_len;
123         int                rc;
124         unsigned long      start;
125
126         LASSERT(dt_object_exists(dt));
127         LASSERT(obj->oo_db);
128
129         start = cfs_time_current();
130
131         read_lock(&obj->oo_attr_lock);
132         old_size = obj->oo_attr.la_size;
133         read_unlock(&obj->oo_attr_lock);
134
135         if (*pos + size > old_size) {
136                 if (old_size < *pos)
137                         return 0;
138                 else
139                         size = old_size - *pos;
140         }
141
142         record_start_io(osd, READ, (size >> PAGE_CACHE_SHIFT), 0);
143
144         rc = -dmu_read(osd->od_os, obj->oo_db->db_object, *pos, size,
145                         buf->lb_buf, DMU_READ_PREFETCH);
146
147         record_end_io(osd, READ, cfs_time_current() - start, size);
148         if (rc == 0) {
149                 rc = size;
150                 *pos += size;
151         }
152         return rc;
153 }
154
155 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
156                                 const struct lu_buf *buf, loff_t pos,
157                                 struct thandle *th)
158 {
159         struct osd_object  *obj  = osd_dt_obj(dt);
160         struct osd_device  *osd = osd_obj2dev(obj);
161         struct osd_thandle *oh;
162         uint64_t            oid;
163         ENTRY;
164
165         oh = container_of0(th, struct osd_thandle, ot_super);
166
167         /* in some cases declare can race with creation (e.g. llog)
168          * and we need to wait till object is initialized. notice
169          * LOHA_EXISTs is supposed to be the last step in the
170          * initialization */
171
172         /* declare possible size change. notice we can't check
173          * current size here as another thread can change it */
174
175         if (dt_object_exists(dt)) {
176                 LASSERT(obj->oo_db);
177                 oid = obj->oo_db->db_object;
178
179                 dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
180         } else {
181                 oid = DMU_NEW_OBJECT;
182                 dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
183         }
184
185         /* XXX: we still miss for append declaration support in ZFS
186          *      -1 means append which is used by llog mostly, llog
187          *      can grow upto LLOG_CHUNK_SIZE*8 records */
188         if (pos == -1)
189                 pos = max_t(loff_t, 256 * 8 * LLOG_CHUNK_SIZE,
190                             obj->oo_attr.la_size + (2 << 20));
191         dmu_tx_hold_write(oh->ot_tx, oid, pos, buf->lb_len);
192
193         /* dt_declare_write() is usually called for system objects, such
194          * as llog or last_rcvd files. We needn't enforce quota on those
195          * objects, so always set the lqi_space as 0. */
196         RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
197                                  obj->oo_attr.la_gid, 0, oh, true, NULL,
198                                  false));
199 }
200
201 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
202                         const struct lu_buf *buf, loff_t *pos,
203                         struct thandle *th, struct lustre_capa *capa,
204                         int ignore_quota)
205 {
206         struct osd_object  *obj  = osd_dt_obj(dt);
207         struct osd_device  *osd = osd_obj2dev(obj);
208         struct osd_thandle *oh;
209         uint64_t            offset = *pos;
210         int                 rc;
211
212         ENTRY;
213
214         LASSERT(dt_object_exists(dt));
215         LASSERT(obj->oo_db);
216
217         LASSERT(th != NULL);
218         oh = container_of0(th, struct osd_thandle, ot_super);
219
220         record_start_io(osd, WRITE, (buf->lb_len >> PAGE_CACHE_SHIFT), 0);
221
222         dmu_write(osd->od_os, obj->oo_db->db_object, offset,
223                 (uint64_t)buf->lb_len, buf->lb_buf, oh->ot_tx);
224         write_lock(&obj->oo_attr_lock);
225         if (obj->oo_attr.la_size < offset + buf->lb_len) {
226                 obj->oo_attr.la_size = offset + buf->lb_len;
227                 write_unlock(&obj->oo_attr_lock);
228                 /* osd_object_sa_update() will be copying directly from oo_attr
229                  * into dbuf.  any update within a single txg will copy the
230                  * most actual */
231                 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
232                                         &obj->oo_attr.la_size, 8, oh);
233                 if (unlikely(rc))
234                         GOTO(out, rc);
235         } else {
236                 write_unlock(&obj->oo_attr_lock);
237         }
238
239         *pos += buf->lb_len;
240         rc = buf->lb_len;
241
242 out:
243         record_end_io(osd, WRITE, 0, buf->lb_len);
244
245         RETURN(rc);
246 }
247
248 /*
249  * XXX: for the moment I don't want to use lnb_flags for osd-internal
250  *      purposes as it's not very well defined ...
251  *      instead I use the lowest bit of the address so that:
252  *        arc buffer:  .lnb_obj = abuf          (arc we loan for write)
253  *        dbuf buffer: .lnb_obj = dbuf | 1      (dbuf we get for read)
254  *        copy buffer: .lnb_page->mapping = obj (page we allocate for write)
255  *
256  *      bzzz, to blame
257  */
258 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
259                         struct niobuf_local *lnb, int npages)
260 {
261         struct osd_object *obj  = osd_dt_obj(dt);
262         struct osd_device *osd = osd_obj2dev(obj);
263         unsigned long      ptr;
264         int                i;
265
266         LASSERT(dt_object_exists(dt));
267         LASSERT(obj->oo_db);
268
269         for (i = 0; i < npages; i++) {
270                 if (lnb[i].lnb_page == NULL)
271                         continue;
272                 if (lnb[i].lnb_page->mapping == (void *)obj) {
273                         /* this is anonymous page allocated for copy-write */
274                         lnb[i].lnb_page->mapping = NULL;
275                         __free_page(lnb[i].lnb_page);
276                         atomic_dec(&osd->od_zerocopy_alloc);
277                 } else {
278                         /* see comment in osd_bufs_get_read() */
279                         ptr = (unsigned long)lnb[i].lnb_data;
280                         if (ptr & 1UL) {
281                                 ptr &= ~1UL;
282                                 dmu_buf_rele((void *)ptr, osd_zerocopy_tag);
283                                 atomic_dec(&osd->od_zerocopy_pin);
284                         } else if (lnb[i].lnb_data != NULL) {
285                                 dmu_return_arcbuf(lnb[i].lnb_data);
286                                 atomic_dec(&osd->od_zerocopy_loan);
287                         }
288                 }
289                 lnb[i].lnb_page = NULL;
290                 lnb[i].lnb_data = NULL;
291         }
292
293         return 0;
294 }
295
296 static inline struct page *kmem_to_page(void *addr)
297 {
298         if (is_vmalloc_addr(addr))
299                 return vmalloc_to_page(addr);
300         else
301                 return virt_to_page(addr);
302 }
303
304 static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
305                                 loff_t off, ssize_t len, struct niobuf_local *lnb)
306 {
307         struct osd_device *osd = osd_obj2dev(obj);
308         dmu_buf_t        **dbp;
309         int                rc, i, numbufs, npages = 0;
310         ENTRY;
311
312         /* grab buffers for read:
313          * OSD API let us to grab buffers first, then initiate IO(s)
314          * so that all required IOs will be done in parallel, but at the
315          * moment DMU doesn't provide us with a method to grab buffers.
316          * If we discover this is a vital for good performance we
317          * can get own replacement for dmu_buf_hold_array_by_bonus().
318          */
319         while (len > 0) {
320                 rc = -dmu_buf_hold_array_by_bonus(obj->oo_db, off, len, TRUE,
321                                                   osd_zerocopy_tag, &numbufs,
322                                                   &dbp);
323                 if (unlikely(rc))
324                         GOTO(err, rc);
325
326                 for (i = 0; i < numbufs; i++) {
327                         int bufoff, tocpy, thispage;
328                         void *dbf = dbp[i];
329
330                         LASSERT(len > 0);
331
332                         atomic_inc(&osd->od_zerocopy_pin);
333
334                         bufoff = off - dbp[i]->db_offset;
335                         tocpy = min_t(int, dbp[i]->db_size - bufoff, len);
336
337                         /* kind of trick to differentiate dbuf vs. arcbuf */
338                         LASSERT(((unsigned long)dbp[i] & 1) == 0);
339                         dbf = (void *) ((unsigned long)dbp[i] | 1);
340
341                         while (tocpy > 0) {
342                                 thispage = PAGE_CACHE_SIZE;
343                                 thispage -= bufoff & (PAGE_CACHE_SIZE - 1);
344                                 thispage = min(tocpy, thispage);
345
346                                 lnb->lnb_rc = 0;
347                                 lnb->lnb_file_offset = off;
348                                 lnb->lnb_page_offset = bufoff & ~CFS_PAGE_MASK;
349                                 lnb->lnb_len = thispage;
350                                 lnb->lnb_page = kmem_to_page(dbp[i]->db_data +
351                                                              bufoff);
352                                 /* mark just a single slot: we need this
353                                  * reference to dbuf to be release once */
354                                 lnb->lnb_data = dbf;
355                                 dbf = NULL;
356
357                                 tocpy -= thispage;
358                                 len -= thispage;
359                                 bufoff += thispage;
360                                 off += thispage;
361
362                                 npages++;
363                                 lnb++;
364                         }
365
366                         /* steal dbuf so dmu_buf_rele_array() cant release it */
367                         dbp[i] = NULL;
368                 }
369
370                 dmu_buf_rele_array(dbp, numbufs, osd_zerocopy_tag);
371         }
372
373         RETURN(npages);
374
375 err:
376         LASSERT(rc < 0);
377         osd_bufs_put(env, &obj->oo_dt, lnb - npages, npages);
378         RETURN(rc);
379 }
380
381 static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj,
382                                 loff_t off, ssize_t len, struct niobuf_local *lnb)
383 {
384         struct osd_device *osd = osd_obj2dev(obj);
385         int                plen, off_in_block, sz_in_block;
386         int                rc, i = 0, npages = 0;
387         arc_buf_t         *abuf;
388         uint32_t           bs;
389         uint64_t           dummy;
390         ENTRY;
391
392         dmu_object_size_from_db(obj->oo_db, &bs, &dummy);
393
394         /*
395          * currently only full blocks are subject to zerocopy approach:
396          * so that we're sure nobody is trying to update the same block
397          */
398         while (len > 0) {
399                 LASSERT(npages < PTLRPC_MAX_BRW_PAGES);
400
401                 off_in_block = off & (bs - 1);
402                 sz_in_block = min_t(int, bs - off_in_block, len);
403
404                 if (sz_in_block == bs) {
405                         /* full block, try to use zerocopy */
406
407                         abuf = dmu_request_arcbuf(obj->oo_db, bs);
408                         if (unlikely(abuf == NULL))
409                                 GOTO(out_err, rc = -ENOMEM);
410
411                         atomic_inc(&osd->od_zerocopy_loan);
412
413                         /* go over pages arcbuf contains, put them as
414                          * local niobufs for ptlrpc's bulks */
415                         while (sz_in_block > 0) {
416                                 plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
417
418                                 lnb[i].lnb_file_offset = off;
419                                 lnb[i].lnb_page_offset = 0;
420                                 lnb[i].lnb_len = plen;
421                                 lnb[i].lnb_rc = 0;
422                                 if (sz_in_block == bs)
423                                         lnb[i].lnb_data = abuf;
424                                 else
425                                         lnb[i].lnb_data = NULL;
426
427                                 /* this one is not supposed to fail */
428                                 lnb[i].lnb_page = kmem_to_page(abuf->b_data +
429                                                         off_in_block);
430                                 LASSERT(lnb[i].lnb_page);
431
432                                 lprocfs_counter_add(osd->od_stats,
433                                                 LPROC_OSD_ZEROCOPY_IO, 1);
434
435                                 sz_in_block -= plen;
436                                 len -= plen;
437                                 off += plen;
438                                 off_in_block += plen;
439                                 i++;
440                                 npages++;
441                         }
442                 } else {
443                         if (off_in_block == 0 && len < bs &&
444                                         off + len >= obj->oo_attr.la_size)
445                                 lprocfs_counter_add(osd->od_stats,
446                                                 LPROC_OSD_TAIL_IO, 1);
447
448                         /* can't use zerocopy, allocate temp. buffers */
449                         while (sz_in_block > 0) {
450                                 plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
451
452                                 lnb[i].lnb_file_offset = off;
453                                 lnb[i].lnb_page_offset = 0;
454                                 lnb[i].lnb_len = plen;
455                                 lnb[i].lnb_rc = 0;
456                                 lnb[i].lnb_data = NULL;
457
458                                 lnb[i].lnb_page = alloc_page(OSD_GFP_IO);
459                                 if (unlikely(lnb[i].lnb_page == NULL))
460                                         GOTO(out_err, rc = -ENOMEM);
461
462                                 LASSERT(lnb[i].lnb_page->mapping == NULL);
463                                 lnb[i].lnb_page->mapping = (void *)obj;
464
465                                 atomic_inc(&osd->od_zerocopy_alloc);
466                                 lprocfs_counter_add(osd->od_stats,
467                                                 LPROC_OSD_COPY_IO, 1);
468
469                                 sz_in_block -= plen;
470                                 len -= plen;
471                                 off += plen;
472                                 i++;
473                                 npages++;
474                         }
475                 }
476         }
477
478         RETURN(npages);
479
480 out_err:
481         osd_bufs_put(env, &obj->oo_dt, lnb, npages);
482         RETURN(rc);
483 }
484
485 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
486                         loff_t offset, ssize_t len, struct niobuf_local *lnb,
487                         int rw, struct lustre_capa *capa)
488 {
489         struct osd_object *obj  = osd_dt_obj(dt);
490         int                rc;
491
492         LASSERT(dt_object_exists(dt));
493         LASSERT(obj->oo_db);
494
495         if (rw == 0)
496                 rc = osd_bufs_get_read(env, obj, offset, len, lnb);
497         else
498                 rc = osd_bufs_get_write(env, obj, offset, len, lnb);
499
500         return rc;
501 }
502
503 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
504                         struct niobuf_local *lnb, int npages)
505 {
506         struct osd_object *obj = osd_dt_obj(dt);
507
508         LASSERT(dt_object_exists(dt));
509         LASSERT(obj->oo_db);
510
511         return 0;
512 }
513
514 /* Return number of blocks that aren't mapped in the [start, start + size]
515  * region */
516 static int osd_count_not_mapped(struct osd_object *obj, uint64_t start,
517                                 uint32_t size)
518 {
519         dmu_buf_impl_t  *dbi = (dmu_buf_impl_t *)obj->oo_db;
520         dmu_buf_impl_t  *db;
521         dnode_t         *dn;
522         uint32_t         blkshift;
523         uint64_t         end, blkid;
524         int              rc;
525         ENTRY;
526
527         DB_DNODE_ENTER(dbi);
528         dn = DB_DNODE(dbi);
529
530         if (dn->dn_maxblkid == 0) {
531                 if (start + size <= dn->dn_datablksz)
532                         GOTO(out, size = 0);
533                 if (start < dn->dn_datablksz)
534                         start = dn->dn_datablksz;
535                 /* assume largest block size */
536                 blkshift = SPA_MAXBLOCKSHIFT;
537         } else {
538                 /* blocksize can't change */
539                 blkshift = dn->dn_datablkshift;
540         }
541
542         /* compute address of last block */
543         end = (start + size - 1) >> blkshift;
544         /* align start on block boundaries */
545         start >>= blkshift;
546
547         /* size is null, can't be mapped */
548         if (obj->oo_attr.la_size == 0 || dn->dn_maxblkid == 0)
549                 GOTO(out, size = (end - start + 1) << blkshift);
550
551         /* beyond EOF, can't be mapped */
552         if (start > dn->dn_maxblkid)
553                 GOTO(out, size = (end - start + 1) << blkshift);
554
555         size = 0;
556         for (blkid = start; blkid <= end; blkid++) {
557                 if (blkid == dn->dn_maxblkid)
558                         /* this one is mapped for sure */
559                         continue;
560                 if (blkid > dn->dn_maxblkid) {
561                         size += (end - blkid + 1) << blkshift;
562                         GOTO(out, size);
563                 }
564
565                 rc = dbuf_hold_impl(dn, 0, blkid, TRUE, FTAG, &db);
566                 if (rc) {
567                         /* for ENOENT (block not mapped) and any other errors,
568                          * assume the block isn't mapped */
569                         size += 1 << blkshift;
570                         continue;
571                 }
572                 dbuf_rele(db, FTAG);
573         }
574
575         GOTO(out, size);
576 out:
577         DB_DNODE_EXIT(dbi);
578         return size;
579 }
580
581 static int osd_declare_write_commit(const struct lu_env *env,
582                                 struct dt_object *dt,
583                                 struct niobuf_local *lnb, int npages,
584                                 struct thandle *th)
585 {
586         struct osd_object  *obj = osd_dt_obj(dt);
587         struct osd_device  *osd = osd_obj2dev(obj);
588         struct osd_thandle *oh;
589         uint64_t            offset = 0;
590         uint32_t            size = 0;
591         int                 i, rc, flags = 0;
592         bool                ignore_quota = false, synced = false;
593         long long           space = 0;
594         struct page        *last_page = NULL;
595         unsigned long       discont_pages = 0;
596         ENTRY;
597
598         LASSERT(dt_object_exists(dt));
599         LASSERT(obj->oo_db);
600
601         LASSERT(lnb);
602         LASSERT(npages > 0);
603
604         oh = container_of0(th, struct osd_thandle, ot_super);
605
606         for (i = 0; i < npages; i++) {
607                 if (last_page && lnb[i].lnb_page->index != (last_page->index + 1))
608                         ++discont_pages;
609                 last_page = lnb[i].lnb_page;
610                 if (lnb[i].lnb_rc)
611                         /* ENOSPC, network RPC error, etc.
612                          * We don't want to book space for pages which will be
613                          * skipped in osd_write_commit(). Hence we skip pages
614                          * with lnb_rc != 0 here too */
615                         continue;
616                 /* ignore quota for the whole request if any page is from
617                  * client cache or written by root.
618                  *
619                  * XXX once we drop the 1.8 client support, the checking
620                  * for whether page is from cache can be simplified as:
621                  * !(lnb[i].flags & OBD_BRW_SYNC)
622                  *
623                  * XXX we could handle this on per-lnb basis as done by
624                  * grant. */
625                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
626                     (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
627                     OBD_BRW_FROM_GRANT)
628                         ignore_quota = true;
629                 if (size == 0) {
630                         /* first valid lnb */
631                         offset = lnb[i].lnb_file_offset;
632                         size = lnb[i].lnb_len;
633                         continue;
634                 }
635                 if (offset + size == lnb[i].lnb_file_offset) {
636                         /* this lnb is contiguous to the previous one */
637                         size += lnb[i].lnb_len;
638                         continue;
639                 }
640
641                 dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object,
642                                   offset, size);
643                 /* estimating space that will be consumed by a write is rather
644                  * complicated with ZFS. As a consequence, we don't account for
645                  * indirect blocks and quota overrun will be adjusted once the
646                  * operation is committed, if required. */
647                 space += osd_count_not_mapped(obj, offset, size);
648
649                 offset = lnb[i].lnb_file_offset;
650                 size = lnb[i].lnb_len;
651         }
652
653         if (size) {
654                 dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object,
655                                   offset, size);
656                 space += osd_count_not_mapped(obj, offset, size);
657         }
658
659         dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
660
661         oh->ot_write_commit = 1; /* used in osd_trans_start() for fail_loc */
662
663         /* backend zfs filesystem might be configured to store multiple data
664          * copies */
665         space  *= osd->od_os->os_copies;
666         space   = toqb(space);
667         CDEBUG(D_QUOTA, "writting %d pages, reserving "LPD64"K of quota "
668                "space\n", npages, space);
669
670         record_start_io(osd, WRITE, npages, discont_pages);
671 retry:
672         /* acquire quota space if needed */
673         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
674                                obj->oo_attr.la_gid, space, oh, true, &flags,
675                                ignore_quota);
676
677         if (!synced && rc == -EDQUOT && (flags & QUOTA_FL_SYNC) != 0) {
678                 dt_sync(env, th->th_dev);
679                 synced = true;
680                 CDEBUG(D_QUOTA, "retry after sync\n");
681                 flags = 0;
682                 goto retry;
683         }
684
685         /* we need only to store the overquota flags in the first lnb for
686          * now, once we support multiple objects BRW, this code needs be
687          * revised. */
688         if (flags & QUOTA_FL_OVER_USRQUOTA)
689                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
690         if (flags & QUOTA_FL_OVER_GRPQUOTA)
691                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
692
693         RETURN(rc);
694 }
695
696 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
697                         struct niobuf_local *lnb, int npages,
698                         struct thandle *th)
699 {
700         struct osd_object  *obj  = osd_dt_obj(dt);
701         struct osd_device  *osd = osd_obj2dev(obj);
702         struct osd_thandle *oh;
703         uint64_t            new_size = 0;
704         int                 i, rc = 0;
705         unsigned long      iosize = 0;
706         ENTRY;
707
708         LASSERT(dt_object_exists(dt));
709         LASSERT(obj->oo_db);
710
711         LASSERT(th != NULL);
712         oh = container_of0(th, struct osd_thandle, ot_super);
713
714         for (i = 0; i < npages; i++) {
715                 CDEBUG(D_INODE, "write %u bytes at %u\n",
716                         (unsigned) lnb[i].lnb_len,
717                         (unsigned) lnb[i].lnb_file_offset);
718
719                 if (lnb[i].lnb_rc) {
720                         /* ENOSPC, network RPC error, etc.
721                          * Unlike ldiskfs, zfs allocates new blocks on rewrite,
722                          * so we skip this page if lnb_rc is set to -ENOSPC */
723                         CDEBUG(D_INODE, "obj "DFID": skipping lnb[%u]: rc=%d\n",
724                                 PFID(lu_object_fid(&dt->do_lu)), i,
725                                 lnb[i].lnb_rc);
726                         continue;
727                 }
728
729                 if (lnb[i].lnb_page->mapping == (void *)obj) {
730                         dmu_write(osd->od_os, obj->oo_db->db_object,
731                                 lnb[i].lnb_file_offset, lnb[i].lnb_len,
732                                 kmap(lnb[i].lnb_page), oh->ot_tx);
733                         kunmap(lnb[i].lnb_page);
734                 } else if (lnb[i].lnb_data) {
735                         LASSERT(((unsigned long)lnb[i].lnb_data & 1) == 0);
736                         /* buffer loaned for zerocopy, try to use it.
737                          * notice that dmu_assign_arcbuf() is smart
738                          * enough to recognize changed blocksize
739                          * in this case it fallbacks to dmu_write() */
740                         dmu_assign_arcbuf(obj->oo_db, lnb[i].lnb_file_offset,
741                                           lnb[i].lnb_data, oh->ot_tx);
742                         /* drop the reference, otherwise osd_put_bufs()
743                          * will be releasing it - bad! */
744                         lnb[i].lnb_data = NULL;
745                         atomic_dec(&osd->od_zerocopy_loan);
746                 }
747
748                 if (new_size < lnb[i].lnb_file_offset + lnb[i].lnb_len)
749                         new_size = lnb[i].lnb_file_offset + lnb[i].lnb_len;
750                 iosize += lnb[i].lnb_len;
751         }
752
753         if (unlikely(new_size == 0)) {
754                 /* no pages to write, no transno is needed */
755                 th->th_local = 1;
756                 /* it is important to return 0 even when all lnb_rc == -ENOSPC
757                  * since ofd_commitrw_write() retries several times on ENOSPC */
758                 record_end_io(osd, WRITE, 0, 0);
759                 RETURN(0);
760         }
761
762         write_lock(&obj->oo_attr_lock);
763         if (obj->oo_attr.la_size < new_size) {
764                 obj->oo_attr.la_size = new_size;
765                 write_unlock(&obj->oo_attr_lock);
766                 /* osd_object_sa_update() will be copying directly from
767                  * oo_attr into dbuf. any update within a single txg will copy
768                  * the most actual */
769                 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
770                                           &obj->oo_attr.la_size, 8, oh);
771         } else {
772                 write_unlock(&obj->oo_attr_lock);
773         }
774
775         record_end_io(osd, WRITE, 0, iosize);
776
777         RETURN(rc);
778 }
779
780 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
781                         struct niobuf_local *lnb, int npages)
782 {
783         struct osd_object *obj  = osd_dt_obj(dt);
784         struct osd_device  *osd = osd_obj2dev(obj);
785         struct lu_buf      buf;
786         loff_t             offset;
787         int                i;
788         unsigned long      start;
789         unsigned long      size = 0;
790
791         LASSERT(dt_object_exists(dt));
792         LASSERT(obj->oo_db);
793
794         start = cfs_time_current();
795
796         record_start_io(osd, READ, npages, 0);
797
798         for (i = 0; i < npages; i++) {
799                 buf.lb_buf = kmap(lnb[i].lnb_page);
800                 buf.lb_len = lnb[i].lnb_len;
801                 offset = lnb[i].lnb_file_offset;
802
803                 CDEBUG(D_OTHER, "read %u bytes at %u\n",
804                         (unsigned) lnb[i].lnb_len,
805                         (unsigned) lnb[i].lnb_file_offset);
806                 lnb[i].lnb_rc = osd_read(env, dt, &buf, &offset, NULL);
807                 kunmap(lnb[i].lnb_page);
808
809                 size += lnb[i].lnb_rc;
810
811                 if (lnb[i].lnb_rc < buf.lb_len) {
812                         /* all subsequent rc should be 0 */
813                         while (++i < npages)
814                                 lnb[i].lnb_rc = 0;
815                         break;
816                 }
817         }
818
819         record_end_io(osd, READ, cfs_time_current() - start, size);
820
821         return 0;
822 }
823
824 /*
825  * Punch/truncate an object
826  *
827  *      IN:     db  - dmu_buf of the object to free data in.
828  *              off - start of section to free.
829  *              len - length of section to free (DMU_OBJECT_END => to EOF).
830  *
831  *      RETURN: 0 if success
832  *              error code if failure
833  *
834  * The transaction passed to this routine must have
835  * dmu_tx_hold_sa() and if off < size, dmu_tx_hold_free()
836  * called and then assigned to a transaction group.
837  */
838 static int __osd_object_punch(objset_t *os, dmu_buf_t *db, dmu_tx_t *tx,
839                                 uint64_t size, uint64_t off, uint64_t len)
840 {
841         int rc = 0;
842
843         /* Assert that the transaction has been assigned to a
844            transaction group. */
845         LASSERT(tx->tx_txg != 0);
846         /*
847          * Nothing to do if file already at desired length.
848          */
849         if (len == DMU_OBJECT_END && size == off)
850                 return 0;
851
852         if (off < size)
853                 rc = -dmu_free_range(os, db->db_object, off, len, tx);
854
855         return rc;
856 }
857
858 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
859                         __u64 start, __u64 end, struct thandle *th,
860                         struct lustre_capa *capa)
861 {
862         struct osd_object  *obj = osd_dt_obj(dt);
863         struct osd_device  *osd = osd_obj2dev(obj);
864         struct osd_thandle *oh;
865         __u64               len;
866         int                 rc = 0;
867         ENTRY;
868
869         LASSERT(dt_object_exists(dt));
870         LASSERT(osd_invariant(obj));
871
872         LASSERT(th != NULL);
873         oh = container_of0(th, struct osd_thandle, ot_super);
874
875         write_lock(&obj->oo_attr_lock);
876         /* truncate */
877         if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
878                 len = DMU_OBJECT_END;
879         else
880                 len = end - start;
881         write_unlock(&obj->oo_attr_lock);
882
883         rc = __osd_object_punch(osd->od_os, obj->oo_db, oh->ot_tx,
884                                 obj->oo_attr.la_size, start, len);
885         /* set new size */
886         if (len == DMU_OBJECT_END) {
887                 write_lock(&obj->oo_attr_lock);
888                 obj->oo_attr.la_size = start;
889                 write_unlock(&obj->oo_attr_lock);
890                 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
891                                           &obj->oo_attr.la_size, 8, oh);
892         }
893         RETURN(rc);
894 }
895
896 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
897                         __u64 start, __u64 end, struct thandle *handle)
898 {
899         struct osd_object  *obj = osd_dt_obj(dt);
900         struct osd_device  *osd = osd_obj2dev(obj);
901         struct osd_thandle *oh;
902         __u64               len;
903         ENTRY;
904
905         oh = container_of0(handle, struct osd_thandle, ot_super);
906
907         read_lock(&obj->oo_attr_lock);
908         if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
909                 len = DMU_OBJECT_END;
910         else
911                 len = end - start;
912
913         /* declare we'll free some blocks ... */
914         if (start < obj->oo_attr.la_size) {
915                 read_unlock(&obj->oo_attr_lock);
916                 dmu_tx_hold_free(oh->ot_tx, obj->oo_db->db_object, start, len);
917         } else {
918                 read_unlock(&obj->oo_attr_lock);
919         }
920
921         /* ... and we'll modify size attribute */
922         dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
923
924         RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
925                                  obj->oo_attr.la_gid, 0, oh, true, NULL,
926                                  false));
927 }
928
929
930 struct dt_body_operations osd_body_ops = {
931         .dbo_read                       = osd_read,
932         .dbo_declare_write              = osd_declare_write,
933         .dbo_write                      = osd_write,
934         .dbo_bufs_get                   = osd_bufs_get,
935         .dbo_bufs_put                   = osd_bufs_put,
936         .dbo_write_prep                 = osd_write_prep,
937         .dbo_declare_write_commit       = osd_declare_write_commit,
938         .dbo_write_commit               = osd_write_commit,
939         .dbo_read_prep                  = osd_read_prep,
940         .dbo_declare_punch              = osd_declare_punch,
941         .dbo_punch                      = osd_punch,
942 };