Whamcloud - gitweb
LU-1883 osd: Fix niobuf_local offset usage
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osd/osd_io.c
37  *
38  * body operations
39  *
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
42  *
43  */
44
45 /* LUSTRE_VERSION_CODE */
46 #include <lustre_ver.h>
47 /* prerequisite for linux/xattr.h */
48 #include <linux/types.h>
49 /* prerequisite for linux/xattr.h */
50 #include <linux/fs.h>
51
52 /* ext_depth() */
53 #include <ldiskfs/ldiskfs.h>
54 #include <ldiskfs/ldiskfs_jbd2.h>
55 #include <ldiskfs/ldiskfs_extents.h>
56
57 /*
58  * struct OBD_{ALLOC,FREE}*()
59  * OBD_FAIL_CHECK
60  */
61 #include <obd_support.h>
62
63 #include "osd_internal.h"
64
65 #ifndef HAVE_PAGE_CONSTANT
66 #define mapping_cap_page_constant_write(mapping) 0
67 #define SetPageConstant(page) do {} while (0)
68 #define ClearPageConstant(page) do {} while (0)
69 #endif
70
71 #ifndef HAS_GENERIC_ERROR_REMOVE_PAGE
72 int generic_error_remove_page(struct address_space *mapping, struct page *page)
73 {
74         if (mapping == NULL)
75                 return -EINVAL;
76
77         if (mapping != page->mapping)
78                 return -EIO;
79         /*
80          * Only punch for normal data pages for now.
81          * Handling other types like directories would need more auditing.
82          */
83         if (!S_ISREG(mapping->host->i_mode))
84                 return -EIO;
85
86         if (page_mapped(page)) {
87                 unmap_mapping_range(mapping,
88                                     (loff_t)page->index << PAGE_CACHE_SHIFT,
89                                     PAGE_CACHE_SIZE, 0);
90         }
91         truncate_complete_page(mapping, page);
92         return 0;
93 }
94 #endif
95
96 static void osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,int rw)
97 {
98         cfs_waitq_init(&iobuf->dr_wait);
99         cfs_atomic_set(&iobuf->dr_numreqs, 0);
100         iobuf->dr_max_pages = PTLRPC_MAX_BRW_PAGES;
101         iobuf->dr_npages = 0;
102         iobuf->dr_error = 0;
103         iobuf->dr_dev = d;
104         iobuf->dr_frags = 0;
105         iobuf->dr_elapsed = 0;
106         /* must be counted before, so assert */
107         LASSERT(iobuf->dr_elapsed_valid == 0);
108         iobuf->dr_rw = rw;
109 }
110
111 static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
112 {
113         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
114         iobuf->dr_pages[iobuf->dr_npages++] = page;
115 }
116
117 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
118 {
119         int rw = iobuf->dr_rw;
120
121         if (iobuf->dr_elapsed_valid) {
122                 iobuf->dr_elapsed_valid = 0;
123                 LASSERT(iobuf->dr_dev == d);
124                 LASSERT(iobuf->dr_frags > 0);
125                 lprocfs_oh_tally(&d->od_brw_stats.
126                                  hist[BRW_R_DIO_FRAGS+rw],
127                                  iobuf->dr_frags);
128                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
129                                       iobuf->dr_elapsed);
130         }
131 }
132
133 #ifdef HAVE_BIO_ENDIO_2ARG
134 #define DIO_RETURN(a)
135 static void dio_complete_routine(struct bio *bio, int error)
136 #else
137 #define DIO_RETURN(a)   return(a)
138 static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
139 #endif
140 {
141         struct osd_iobuf *iobuf = bio->bi_private;
142         struct bio_vec *bvl;
143         int i;
144
145         /* CAVEAT EMPTOR: possibly in IRQ context
146          * DO NOT record procfs stats here!!! */
147
148         if (unlikely(iobuf == NULL)) {
149                 CERROR("***** bio->bi_private is NULL!  This should never "
150                        "happen.  Normally, I would crash here, but instead I "
151                        "will dump the bio contents to the console.  Please "
152                        "report this to <http://jira.whamcloud.com/> , along "
153                        "with any interesting messages leading up to this point "
154                        "(like SCSI errors, perhaps).  Because bi_private is "
155                        "NULL, I can't wake up the thread that initiated this "
156                        "IO - you will probably have to reboot this node.\n");
157                 CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
158                        "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
159                        "bi_private: %p\n", bio->bi_next, bio->bi_flags,
160                        bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
161                        bio->bi_end_io, cfs_atomic_read(&bio->bi_cnt),
162                        bio->bi_private);
163                 DIO_RETURN(0);
164         }
165
166         /* the check is outside of the cycle for performance reason -bzzz */
167         if (!cfs_test_bit(BIO_RW, &bio->bi_rw)) {
168                 bio_for_each_segment(bvl, bio, i) {
169                         if (likely(error == 0))
170                                 SetPageUptodate(bvl->bv_page);
171                         LASSERT(PageLocked(bvl->bv_page));
172                         ClearPageConstant(bvl->bv_page);
173                 }
174                 cfs_atomic_dec(&iobuf->dr_dev->od_r_in_flight);
175         } else {
176                 struct page *p = iobuf->dr_pages[0];
177                 if (p->mapping) {
178                         if (mapping_cap_page_constant_write(p->mapping)) {
179                                 bio_for_each_segment(bvl, bio, i) {
180                                         ClearPageConstant(bvl->bv_page);
181                                 }
182                         }
183                 }
184                 cfs_atomic_dec(&iobuf->dr_dev->od_w_in_flight);
185         }
186
187         /* any real error is good enough -bzzz */
188         if (error != 0 && iobuf->dr_error == 0)
189                 iobuf->dr_error = error;
190
191         if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs)) {
192                 iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
193                 iobuf->dr_elapsed_valid = 1;
194                 cfs_waitq_signal(&iobuf->dr_wait);
195         }
196
197         /* Completed bios used to be chained off iobuf->dr_bios and freed in
198          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
199          * mempool when serious on-disk fragmentation was encountered,
200          * deadlocking the OST.  The bios are now released as soon as complete
201          * so the pool cannot be exhausted while IOs are competing. bug 10076 */
202         bio_put(bio);
203         DIO_RETURN(0);
204 }
205
206 static void record_start_io(struct osd_iobuf *iobuf, int size)
207 {
208         struct osd_device    *osd = iobuf->dr_dev;
209         struct obd_histogram *h = osd->od_brw_stats.hist;
210
211         iobuf->dr_frags++;
212         cfs_atomic_inc(&iobuf->dr_numreqs);
213
214         if (iobuf->dr_rw == 0) {
215                 cfs_atomic_inc(&osd->od_r_in_flight);
216                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
217                                  cfs_atomic_read(&osd->od_r_in_flight));
218                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
219         } else if (iobuf->dr_rw == 1) {
220                 cfs_atomic_inc(&osd->od_w_in_flight);
221                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
222                                  cfs_atomic_read(&osd->od_w_in_flight));
223                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
224         } else {
225                 LBUG();
226         }
227 }
228
229 static void osd_submit_bio(int rw, struct bio *bio)
230 {
231         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
232         if (rw == 0)
233                 submit_bio(READ, bio);
234         else
235                 submit_bio(WRITE, bio);
236 }
237
238 static int can_be_merged(struct bio *bio, sector_t sector)
239 {
240         unsigned int size;
241
242         if (!bio)
243                 return 0;
244
245         size = bio->bi_size >> 9;
246         return bio->bi_sector + size == sector ? 1 : 0;
247 }
248
249 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
250                       struct osd_iobuf *iobuf)
251 {
252         int            blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
253         struct page  **pages = iobuf->dr_pages;
254         int            npages = iobuf->dr_npages;
255         unsigned long *blocks = iobuf->dr_blocks;
256         int            total_blocks = npages * blocks_per_page;
257         int            sector_bits = inode->i_sb->s_blocksize_bits - 9;
258         unsigned int   blocksize = inode->i_sb->s_blocksize;
259         struct bio    *bio = NULL;
260         struct page   *page;
261         unsigned int   page_offset;
262         sector_t       sector;
263         int            nblocks;
264         int            block_idx;
265         int            page_idx;
266         int            i;
267         int            rc = 0;
268         ENTRY;
269
270         LASSERT(iobuf->dr_npages == npages);
271
272         osd_brw_stats_update(osd, iobuf);
273         iobuf->dr_start_time = cfs_time_current();
274
275         for (page_idx = 0, block_idx = 0;
276              page_idx < npages;
277              page_idx++, block_idx += blocks_per_page) {
278
279                 page = pages[page_idx];
280                 LASSERT(block_idx + blocks_per_page <= total_blocks);
281
282                 for (i = 0, page_offset = 0;
283                      i < blocks_per_page;
284                      i += nblocks, page_offset += blocksize * nblocks) {
285
286                         nblocks = 1;
287
288                         if (blocks[block_idx + i] == 0) {  /* hole */
289                                 LASSERTF(iobuf->dr_rw == 0,
290                                          "page_idx %u, block_idx %u, i %u\n",
291                                          page_idx, block_idx, i);
292                                 memset(kmap(page) + page_offset, 0, blocksize);
293                                 kunmap(page);
294                                 continue;
295                         }
296
297                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
298
299                         /* Additional contiguous file blocks? */
300                         while (i + nblocks < blocks_per_page &&
301                                (sector + (nblocks << sector_bits)) ==
302                                ((sector_t)blocks[block_idx + i + nblocks] <<
303                                 sector_bits))
304                                 nblocks++;
305
306                         /* I only set the page to be constant only if it
307                          * is mapped to a contiguous underlying disk block(s).
308                          * It will then make sure the corresponding device
309                          * cache of raid5 will be overwritten by this page.
310                          * - jay */
311                         if (iobuf->dr_rw && (nblocks == blocks_per_page) &&
312                             mapping_cap_page_constant_write(inode->i_mapping))
313                                 SetPageConstant(page);
314
315                         if (bio != NULL &&
316                             can_be_merged(bio, sector) &&
317                             bio_add_page(bio, page,
318                                          blocksize * nblocks, page_offset) != 0)
319                                 continue;       /* added this frag OK */
320
321                         if (bio != NULL) {
322                                 struct request_queue *q =
323                                         bdev_get_queue(bio->bi_bdev);
324
325                                 /* Dang! I have to fragment this I/O */
326                                 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
327                                        "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
328                                        bio->bi_size,
329                                        bio->bi_vcnt, bio->bi_max_vecs,
330                                        bio->bi_size >> 9, queue_max_sectors(q),
331                                        bio_phys_segments(q, bio),
332                                        queue_max_phys_segments(q),
333                                        bio_hw_segments(q, bio),
334                                        queue_max_hw_segments(q));
335
336                                 record_start_io(iobuf, bio->bi_size);
337                                 osd_submit_bio(iobuf->dr_rw, bio);
338                         }
339
340                         /* allocate new bio, limited by max BIO size, b=9945 */
341                         bio = bio_alloc(GFP_NOIO, max(BIO_MAX_PAGES,
342                                                       (npages - page_idx) *
343                                                       blocks_per_page));
344                         if (bio == NULL) {
345                                 CERROR("Can't allocate bio %u*%u = %u pages\n",
346                                        (npages - page_idx), blocks_per_page,
347                                        (npages - page_idx) * blocks_per_page);
348                                 rc = -ENOMEM;
349                                 goto out;
350                         }
351
352                         bio->bi_bdev = inode->i_sb->s_bdev;
353                         bio->bi_sector = sector;
354                         bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
355                         bio->bi_end_io = dio_complete_routine;
356                         bio->bi_private = iobuf;
357
358                         rc = bio_add_page(bio, page,
359                                           blocksize * nblocks, page_offset);
360                         LASSERT(rc != 0);
361                 }
362         }
363
364         if (bio != NULL) {
365                 record_start_io(iobuf, bio->bi_size);
366                 osd_submit_bio(iobuf->dr_rw, bio);
367                 rc = 0;
368         }
369
370  out:
371         /* in order to achieve better IO throughput, we don't wait for writes
372          * completion here. instead we proceed with transaction commit in
373          * parallel and wait for IO completion once transaction is stopped
374          * see osd_trans_stop() for more details -bzzz */
375         if (iobuf->dr_rw == 0) {
376                 cfs_wait_event(iobuf->dr_wait,
377                                cfs_atomic_read(&iobuf->dr_numreqs) == 0);
378         }
379
380         if (rc == 0)
381                 rc = iobuf->dr_error;
382         RETURN(rc);
383 }
384
385 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
386                                    struct niobuf_local *lnb)
387 {
388         ENTRY;
389
390         *nrpages = 0;
391
392         while (len > 0) {
393                 int poff = offset & (CFS_PAGE_SIZE - 1);
394                 int plen = CFS_PAGE_SIZE - poff;
395
396                 if (plen > len)
397                         plen = len;
398                 lnb->lnb_file_offset = offset;
399                 lnb->lnb_page_offset = poff;
400                 lnb->len = plen;
401                 /* lb->flags = rnb->flags; */
402                 lnb->flags = 0;
403                 lnb->page = NULL;
404                 lnb->rc = 0;
405
406                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
407                          (long long) len);
408                 offset += plen;
409                 len -= plen;
410                 lnb++;
411                 (*nrpages)++;
412         }
413
414         RETURN(0);
415 }
416
417 struct page *osd_get_page(struct dt_object *dt, loff_t offset, int rw)
418 {
419         struct inode      *inode = osd_dt_obj(dt)->oo_inode;
420         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
421         struct page       *page;
422
423         LASSERT(inode);
424
425         page = find_or_create_page(inode->i_mapping, offset >> CFS_PAGE_SHIFT,
426                                    GFP_NOFS | __GFP_HIGHMEM);
427         if (unlikely(page == NULL))
428                 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
429
430         return page;
431 }
432
433 /*
434  * there are following "locks":
435  * journal_start
436  * i_alloc_sem
437  * i_mutex
438  * page lock
439
440  * osd write path
441     * lock page(s)
442     * journal_start
443     * truncate_sem
444
445  * ext4 vmtruncate:
446     * lock pages, unlock
447     * journal_start
448     * lock partial page
449     * i_data_sem
450
451 */
452 int osd_bufs_get(const struct lu_env *env, struct dt_object *d, loff_t pos,
453                  ssize_t len, struct niobuf_local *lnb, int rw,
454                  struct lustre_capa *capa)
455 {
456         struct osd_object   *obj    = osd_dt_obj(d);
457         int npages, i, rc = 0;
458
459         LASSERT(obj->oo_inode);
460
461         osd_map_remote_to_local(pos, len, &npages, lnb);
462
463         for (i = 0; i < npages; i++, lnb++) {
464
465                 /* We still set up for ungranted pages so that granted pages
466                  * can be written to disk as they were promised, and portals
467                  * needs to keep the pages all aligned properly. */
468                 lnb->dentry = (void *) obj;
469
470                 lnb->page = osd_get_page(d, lnb->lnb_file_offset, rw);
471                 if (lnb->page == NULL)
472                         GOTO(cleanup, rc = -ENOMEM);
473
474                 /* DLM locking protects us from write and truncate competing
475                  * for same region, but truncate can leave dirty page in the
476                  * cache. it's possible the writeout on a such a page is in
477                  * progress when we access it. it's also possible that during
478                  * this writeout we put new (partial) data, but then won't
479                  * be able to proceed in filter_commitrw_write(). thus let's
480                  * just wait for writeout completion, should be rare enough.
481                  * -bzzz */
482                 wait_on_page_writeback(lnb->page);
483                 BUG_ON(PageWriteback(lnb->page));
484
485                 lu_object_get(&d->do_lu);
486         }
487         rc = i;
488
489 cleanup:
490         RETURN(rc);
491 }
492
493 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
494                         struct niobuf_local *lnb, int npages)
495 {
496         struct osd_thread_info *oti = osd_oti_get(env);
497         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
498         struct osd_device      *d = osd_obj2dev(osd_dt_obj(dt));
499         int                     i;
500
501         /* to do IO stats, notice we do this here because
502          * osd_do_bio() doesn't wait for write to complete */
503         osd_fini_iobuf(d, iobuf);
504
505         for (i = 0; i < npages; i++) {
506                 if (lnb[i].page == NULL)
507                         continue;
508                 LASSERT(PageLocked(lnb[i].page));
509                 unlock_page(lnb[i].page);
510                 page_cache_release(lnb[i].page);
511                 lu_object_put(env, &dt->do_lu);
512                 lnb[i].page = NULL;
513         }
514         RETURN(0);
515 }
516
517 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
518                           struct niobuf_local *lnb, int npages)
519 {
520         struct osd_thread_info *oti   = osd_oti_get(env);
521         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
522         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
523         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
524         struct timeval          start;
525         struct timeval          end;
526         unsigned long           timediff;
527         ssize_t                 isize;
528         __s64                   maxidx;
529         int                     rc = 0;
530         int                     i;
531         int                     cache = 0;
532
533         LASSERT(inode);
534
535         osd_init_iobuf(osd, iobuf, 0);
536
537         isize = i_size_read(inode);
538         maxidx = ((isize + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT) - 1;
539
540         if (osd->od_writethrough_cache)
541                 cache = 1;
542         if (isize > osd->od_readcache_max_filesize)
543                 cache = 0;
544
545         cfs_gettimeofday(&start);
546         for (i = 0; i < npages; i++) {
547
548                 if (cache == 0)
549                         generic_error_remove_page(inode->i_mapping,
550                                                   lnb[i].page);
551
552                 /*
553                  * till commit the content of the page is undefined
554                  * we'll set it uptodate once bulk is done. otherwise
555                  * subsequent reads can access non-stable data
556                  */
557                 ClearPageUptodate(lnb[i].page);
558
559                 if (lnb[i].len == CFS_PAGE_SIZE)
560                         continue;
561
562                 if (maxidx >= lnb[i].page->index) {
563                         osd_iobuf_add_page(iobuf, lnb[i].page);
564                 } else {
565                         long off;
566                         char *p = kmap(lnb[i].page);
567
568                         off = lnb[i].lnb_page_offset;
569                         if (off)
570                                 memset(p, 0, off);
571                         off = (lnb[i].lnb_page_offset + lnb[i].len) &
572                               ~CFS_PAGE_MASK;
573                         if (off)
574                                 memset(p + off, 0, CFS_PAGE_SIZE - off);
575                         kunmap(lnb[i].page);
576                 }
577         }
578         cfs_gettimeofday(&end);
579         timediff = cfs_timeval_sub(&end, &start, NULL);
580         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
581
582         if (iobuf->dr_npages) {
583                 rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages,
584                                                        iobuf->dr_npages,
585                                                        iobuf->dr_blocks,
586                                                        oti->oti_created,
587                                                        0, NULL);
588                 if (likely(rc == 0)) {
589                         rc = osd_do_bio(osd, inode, iobuf);
590                         /* do IO stats for preparation reads */
591                         osd_fini_iobuf(osd, iobuf);
592                 }
593         }
594         RETURN(rc);
595 }
596
597 /* Check if a block is allocated or not */
598 static int osd_is_mapped(struct inode *inode, obd_size offset)
599 {
600         sector_t (*fs_bmap)(struct address_space *, sector_t);
601
602         fs_bmap = inode->i_mapping->a_ops->bmap;
603
604         /* We can't know if we are overwriting or not */
605         if (unlikely(fs_bmap == NULL))
606                 return 0;
607
608         if (i_size_read(inode) == 0)
609                 return 0;
610
611         /* Beyond EOF, must not be mapped */
612         if (((i_size_read(inode) - 1) >> inode->i_blkbits) <
613             (offset >> inode->i_blkbits))
614                 return 0;
615
616         if (fs_bmap(inode->i_mapping, offset >> inode->i_blkbits) == 0)
617                 return 0;
618
619         return 1;
620 }
621
622 static int osd_declare_write_commit(const struct lu_env *env,
623                                     struct dt_object *dt,
624                                     struct niobuf_local *lnb, int npages,
625                                     struct thandle *handle)
626 {
627         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
628         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
629         struct osd_thandle      *oh;
630         int                      extents = 1;
631         int                      depth;
632         int                      i;
633         int                      newblocks;
634         int                      rc = 0;
635         int                      flags = 0;
636         bool                     ignore_quota = false;
637         long long                quota_space = 0;
638         ENTRY;
639
640         LASSERT(handle != NULL);
641         oh = container_of0(handle, struct osd_thandle, ot_super);
642         LASSERT(oh->ot_handle == NULL);
643
644         newblocks = npages;
645
646         /* calculate number of extents (probably better to pass nb) */
647         for (i = 0; i < npages; i++) {
648                 if (i && lnb[i].lnb_file_offset !=
649                     lnb[i - 1].lnb_file_offset + lnb[i - 1].len)
650                         extents++;
651
652                 if (!osd_is_mapped(inode, lnb[i].lnb_file_offset))
653                         quota_space += CFS_PAGE_SIZE;
654
655                 /* ignore quota for the whole request if any page is from
656                  * client cache or written by root.
657                  *
658                  * XXX we could handle this on per-lnb basis as done by
659                  * grant. */
660                 if ((lnb[i].flags & OBD_BRW_NOQUOTA) ||
661                     !(lnb[i].flags & OBD_BRW_SYNC))
662                         ignore_quota = true;
663         }
664
665         /*
666          * each extent can go into new leaf causing a split
667          * 5 is max tree depth: inode + 4 index blocks
668          * with blockmaps, depth is 3 at most
669          */
670         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
671                 /*
672                  * many concurrent threads may grow tree by the time
673                  * our transaction starts. so, consider 2 is a min depth
674                  */
675                 depth = ext_depth(inode);
676                 depth = max(depth, 1) + 1;
677                 newblocks += depth;
678                 oh->ot_credits++; /* inode */
679                 oh->ot_credits += depth * 2 * extents;
680         } else {
681                 depth = 3;
682                 newblocks += depth;
683                 oh->ot_credits++; /* inode */
684                 oh->ot_credits += depth * extents;
685         }
686
687         /* quota space for metadata blocks */
688         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
689
690         /* quota space should be reported in 1K blocks */
691         quota_space = toqb(quota_space);
692
693         /* each new block can go in different group (bitmap + gd) */
694
695         /* we can't dirty more bitmap blocks than exist */
696         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
697                 oh->ot_credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
698         else
699                 oh->ot_credits += newblocks;
700
701         /* we can't dirty more gd blocks than exist */
702         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
703                 oh->ot_credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
704         else
705                 oh->ot_credits += newblocks;
706
707         /* make sure the over quota flags were not set */
708         lnb[0].flags &= ~(OBD_BRW_OVER_USRQUOTA | OBD_BRW_OVER_GRPQUOTA);
709
710         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid,
711                                    quota_space, oh, true, true, &flags,
712                                    ignore_quota);
713
714         /* we need only to store the overquota flags in the first lnb for
715          * now, once we support multiple objects BRW, this code needs be
716          * revised. */
717         if (flags & QUOTA_FL_OVER_USRQUOTA)
718                 lnb[0].flags |= OBD_BRW_OVER_USRQUOTA;
719         if (flags & QUOTA_FL_OVER_GRPQUOTA)
720                 lnb[0].flags |= OBD_BRW_OVER_GRPQUOTA;
721
722         RETURN(rc);
723 }
724
725 /* Check if a block is allocated or not */
726 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
727                             struct niobuf_local *lnb, int npages,
728                             struct thandle *thandle)
729 {
730         struct osd_thread_info *oti = osd_oti_get(env);
731         struct osd_iobuf *iobuf = &oti->oti_iobuf;
732         struct inode *inode = osd_dt_obj(dt)->oo_inode;
733         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
734         loff_t isize;
735         int rc = 0, i;
736
737         LASSERT(inode);
738
739         osd_init_iobuf(osd, iobuf, 1);
740         isize = i_size_read(inode);
741         ll_vfs_dq_init(inode);
742
743         for (i = 0; i < npages; i++) {
744                 if (lnb[i].rc == -ENOSPC &&
745                     osd_is_mapped(inode, lnb[i].lnb_file_offset)) {
746                         /* Allow the write to proceed if overwriting an
747                          * existing block */
748                         lnb[i].rc = 0;
749                 }
750
751                 if (lnb[i].rc) { /* ENOSPC, network RPC error, etc. */
752                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
753                                lnb[i].rc);
754                         LASSERT(lnb[i].page);
755                         generic_error_remove_page(inode->i_mapping,lnb[i].page);
756                         continue;
757                 }
758
759                 LASSERT(PageLocked(lnb[i].page));
760                 LASSERT(!PageWriteback(lnb[i].page));
761
762                 if (lnb[i].lnb_file_offset + lnb[i].len > isize)
763                         isize = lnb[i].lnb_file_offset + lnb[i].len;
764
765                 /*
766                  * Since write and truncate are serialized by oo_sem, even
767                  * partial-page truncate should not leave dirty pages in the
768                  * page cache.
769                  */
770                 LASSERT(!PageDirty(lnb[i].page));
771
772                 SetPageUptodate(lnb[i].page);
773
774                 osd_iobuf_add_page(iobuf, lnb[i].page);
775         }
776
777         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
778                 rc = -ENOSPC;
779         } else if (iobuf->dr_npages > 0) {
780                 rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages,
781                                                        iobuf->dr_npages,
782                                                        iobuf->dr_blocks,
783                                                        oti->oti_created,
784                                                        1, NULL);
785         } else {
786                 /* no pages to write, no transno is needed */
787                 thandle->th_local = 1;
788         }
789
790         if (likely(rc == 0)) {
791                 if (isize > i_size_read(inode)) {
792                         i_size_write(inode, isize);
793                         LDISKFS_I(inode)->i_disksize = isize;
794                         inode->i_sb->s_op->dirty_inode(inode);
795                 }
796
797                 rc = osd_do_bio(osd, inode, iobuf);
798                 /* we don't do stats here as in read path because
799                  * write is async: we'll do this in osd_put_bufs() */
800         }
801
802         if (unlikely(rc != 0)) {
803                 /* if write fails, we should drop pages from the cache */
804                 for (i = 0; i < npages; i++) {
805                         if (lnb[i].page == NULL)
806                                 continue;
807                         LASSERT(PageLocked(lnb[i].page));
808                         generic_error_remove_page(inode->i_mapping,lnb[i].page);
809                 }
810         }
811
812         RETURN(rc);
813 }
814
815 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
816                          struct niobuf_local *lnb, int npages)
817 {
818         struct osd_thread_info *oti = osd_oti_get(env);
819         struct osd_iobuf *iobuf = &oti->oti_iobuf;
820         struct inode *inode = osd_dt_obj(dt)->oo_inode;
821         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
822         struct timeval start, end;
823         unsigned long timediff;
824         int rc = 0, i, m = 0, cache = 0;
825
826         LASSERT(inode);
827
828         osd_init_iobuf(osd, iobuf, 0);
829
830         if (osd->od_read_cache)
831                 cache = 1;
832         if (i_size_read(inode) > osd->od_readcache_max_filesize)
833                 cache = 0;
834
835         cfs_gettimeofday(&start);
836         for (i = 0; i < npages; i++) {
837
838                 if (i_size_read(inode) <= lnb[i].lnb_file_offset)
839                         /* If there's no more data, abort early.
840                          * lnb->rc == 0, so it's easy to detect later. */
841                         break;
842
843                 if (i_size_read(inode) <
844                     lnb[i].lnb_file_offset + lnb[i].len - 1)
845                         lnb[i].rc = i_size_read(inode) - lnb[i].lnb_file_offset;
846                 else
847                         lnb[i].rc = lnb[i].len;
848                 m += lnb[i].len;
849
850                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS, 1);
851                 if (PageUptodate(lnb[i].page)) {
852                         lprocfs_counter_add(osd->od_stats,
853                                             LPROC_OSD_CACHE_HIT, 1);
854                 } else {
855                         lprocfs_counter_add(osd->od_stats,
856                                             LPROC_OSD_CACHE_MISS, 1);
857                         osd_iobuf_add_page(iobuf, lnb[i].page);
858                 }
859                 if (cache == 0)
860                         generic_error_remove_page(inode->i_mapping,lnb[i].page);
861         }
862         cfs_gettimeofday(&end);
863         timediff = cfs_timeval_sub(&end, &start, NULL);
864         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
865
866         if (iobuf->dr_npages) {
867                 rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages,
868                                                        iobuf->dr_npages,
869                                                        iobuf->dr_blocks,
870                                                        oti->oti_created,
871                                                        0, NULL);
872                 rc = osd_do_bio(osd, inode, iobuf);
873
874                 /* IO stats will be done in osd_bufs_put() */
875         }
876
877         RETURN(rc);
878 }
879
880 /*
881  * XXX: Another layering violation for now.
882  *
883  * We don't want to use ->f_op->read methods, because generic file write
884  *
885  *         - serializes on ->i_sem, and
886  *
887  *         - does a lot of extra work like balance_dirty_pages(),
888  *
889  * which doesn't work for globally shared files like /last_rcvd.
890  */
891 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
892 {
893         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
894
895         memcpy(buffer, (char *)ei->i_data, buflen);
896
897         return  buflen;
898 }
899
900 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
901 {
902         struct buffer_head *bh;
903         unsigned long block;
904         int osize;
905         int blocksize;
906         int csize;
907         int boffs;
908         int err;
909
910         /* prevent reading after eof */
911         cfs_spin_lock(&inode->i_lock);
912         if (i_size_read(inode) < *offs + size) {
913                 loff_t diff = i_size_read(inode) - *offs;
914                 cfs_spin_unlock(&inode->i_lock);
915                 if (diff < 0) {
916                         CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
917                                i_size_read(inode), *offs);
918                         return -EBADR;
919                 } else if (diff == 0) {
920                         return 0;
921                 } else {
922                         size = diff;
923                 }
924         } else {
925                 cfs_spin_unlock(&inode->i_lock);
926         }
927
928         blocksize = 1 << inode->i_blkbits;
929         osize = size;
930         while (size > 0) {
931                 block = *offs >> inode->i_blkbits;
932                 boffs = *offs & (blocksize - 1);
933                 csize = min(blocksize - boffs, size);
934                 bh = ldiskfs_bread(NULL, inode, block, 0, &err);
935                 if (!bh) {
936                         CERROR("%s: can't read %u@%llu on ino %lu: rc = %d\n",
937                                LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
938                                csize, *offs, inode->i_ino, err);
939                         return err;
940                 }
941
942                 memcpy(buf, bh->b_data + boffs, csize);
943                 brelse(bh);
944
945                 *offs += csize;
946                 buf += csize;
947                 size -= csize;
948         }
949         return osize;
950 }
951
952 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
953                         struct lu_buf *buf, loff_t *pos,
954                         struct lustre_capa *capa)
955 {
956         struct inode *inode = osd_dt_obj(dt)->oo_inode;
957         int           rc;
958
959         if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
960                 RETURN(-EACCES);
961
962         /* Read small symlink from inode body as we need to maintain correct
963          * on-disk symlinks for ldiskfs.
964          */
965         if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
966             (buf->lb_len <= sizeof(LDISKFS_I(inode)->i_data)))
967                 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
968         else
969                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
970
971         return rc;
972 }
973
974 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
975                                  const loff_t size, loff_t pos,
976                                  struct thandle *handle)
977 {
978         struct osd_thandle *oh;
979         int                 credits;
980         struct inode       *inode;
981         int                 rc;
982         ENTRY;
983
984         LASSERT(handle != NULL);
985
986         oh = container_of0(handle, struct osd_thandle, ot_super);
987         LASSERT(oh->ot_handle == NULL);
988
989         /* XXX: size == 0 or INT_MAX indicating a catalog header update or
990          *      llog write, see comment in mdd_declare_llog_record().
991          *
992          *      This hack will be removed with llog over OSD landing
993          */
994         if (size == DECLARE_LLOG_REWRITE)
995                 credits = 2;
996         else if (size == DECLARE_LLOG_WRITE)
997                 credits = 6;
998         else
999                 credits = osd_dto_credits_noquota[DTO_WRITE_BLOCK];
1000
1001         OSD_DECLARE_OP(oh, write);
1002         oh->ot_credits += credits;
1003
1004         inode = osd_dt_obj(dt)->oo_inode;
1005
1006         /* we may declare write to non-exist llog */
1007         if (inode == NULL)
1008                 RETURN(0);
1009
1010         /* dt_declare_write() is usually called for system objects, such
1011          * as llog or last_rcvd files. We needn't enforce quota on those
1012          * objects, so always set the lqi_space as 0. */
1013         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
1014                                    true, true, NULL, false);
1015         RETURN(rc);
1016 }
1017
1018 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1019 {
1020
1021         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1022         LDISKFS_I(inode)->i_disksize = buflen;
1023         i_size_write(inode, buflen);
1024         inode->i_sb->s_op->dirty_inode(inode);
1025
1026         return 0;
1027 }
1028
1029 int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
1030                              int write_NUL, loff_t *offs, handle_t *handle)
1031 {
1032         struct buffer_head *bh        = NULL;
1033         loff_t              offset    = *offs;
1034         loff_t              new_size  = i_size_read(inode);
1035         unsigned long       block;
1036         int                 blocksize = 1 << inode->i_blkbits;
1037         int                 err = 0;
1038         int                 size;
1039         int                 boffs;
1040         int                 dirty_inode = 0;
1041
1042         if (write_NUL) {
1043                 /*
1044                  * long symlink write does not count the NUL terminator in
1045                  * bufsize, we write it, and the inode's file size does not
1046                  * count the NUL terminator as well.
1047                  */
1048                 ((char *)buf)[bufsize] = '\0';
1049                 ++bufsize;
1050         }
1051         while (bufsize > 0) {
1052                 if (bh != NULL)
1053                         brelse(bh);
1054
1055                 block = offset >> inode->i_blkbits;
1056                 boffs = offset & (blocksize - 1);
1057                 size = min(blocksize - boffs, bufsize);
1058                 bh = ldiskfs_bread(handle, inode, block, 1, &err);
1059                 if (!bh) {
1060                         CERROR("%s: error reading offset %llu (block %lu): "
1061                                "rc = %d\n",
1062                                inode->i_sb->s_id, offset, block, err);
1063                         break;
1064                 }
1065
1066                 err = ldiskfs_journal_get_write_access(handle, bh);
1067                 if (err) {
1068                         CERROR("journal_get_write_access() returned error %d\n",
1069                                err);
1070                         break;
1071                 }
1072                 LASSERTF(boffs + size <= bh->b_size,
1073                          "boffs %d size %d bh->b_size %lu",
1074                          boffs, size, (unsigned long)bh->b_size);
1075                 memcpy(bh->b_data + boffs, buf, size);
1076                 err = ldiskfs_journal_dirty_metadata(handle, bh);
1077                 if (err)
1078                         break;
1079
1080                 if (offset + size > new_size)
1081                         new_size = offset + size;
1082                 offset += size;
1083                 bufsize -= size;
1084                 buf += size;
1085         }
1086         if (bh)
1087                 brelse(bh);
1088
1089         if (write_NUL)
1090                 --new_size;
1091         /* correct in-core and on-disk sizes */
1092         if (new_size > i_size_read(inode)) {
1093                 cfs_spin_lock(&inode->i_lock);
1094                 if (new_size > i_size_read(inode))
1095                         i_size_write(inode, new_size);
1096                 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
1097                         LDISKFS_I(inode)->i_disksize = i_size_read(inode);
1098                         dirty_inode = 1;
1099                 }
1100                 cfs_spin_unlock(&inode->i_lock);
1101                 if (dirty_inode)
1102                         inode->i_sb->s_op->dirty_inode(inode);
1103         }
1104
1105         if (err == 0)
1106                 *offs = offset;
1107         return err;
1108 }
1109
1110 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1111                          const struct lu_buf *buf, loff_t *pos,
1112                          struct thandle *handle, struct lustre_capa *capa,
1113                          int ignore_quota)
1114 {
1115         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1116         struct osd_thandle      *oh;
1117         ssize_t                 result;
1118 #ifdef HAVE_QUOTA_SUPPORT
1119         cfs_cap_t               save = cfs_curproc_cap_pack();
1120 #endif
1121         int                     is_link;
1122
1123         LASSERT(dt_object_exists(dt));
1124
1125         if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_WRITE))
1126                 return -EACCES;
1127
1128         LASSERT(handle != NULL);
1129
1130         /* XXX: don't check: one declared chunk can be used many times */
1131         /* OSD_EXEC_OP(handle, write); */
1132
1133         oh = container_of(handle, struct osd_thandle, ot_super);
1134         LASSERT(oh->ot_handle->h_transaction != NULL);
1135 #ifdef HAVE_QUOTA_SUPPORT
1136         if (ignore_quota)
1137                 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
1138         else
1139                 cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
1140 #endif
1141         /* Write small symlink to inode body as we need to maintain correct
1142          * on-disk symlinks for ldiskfs.
1143          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
1144          * does not count it in.
1145          */
1146         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
1147         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1148                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
1149         else
1150                 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
1151                                                   buf->lb_len, is_link, pos,
1152                                                   oh->ot_handle);
1153 #ifdef HAVE_QUOTA_SUPPORT
1154         cfs_curproc_cap_unpack(save);
1155 #endif
1156         if (result == 0)
1157                 result = buf->lb_len;
1158         return result;
1159 }
1160
1161 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1162                              __u64 start, __u64 end, struct thandle *th)
1163 {
1164         struct osd_thandle *oh;
1165         struct inode       *inode;
1166         int                 rc;
1167         ENTRY;
1168
1169         LASSERT(th);
1170         oh = container_of(th, struct osd_thandle, ot_super);
1171
1172         OSD_DECLARE_OP(oh, punch);
1173
1174         /*
1175          * we don't need to reserve credits for whole truncate
1176          * it's not possible as truncate may need to free too many
1177          * blocks and that won't fit a single transaction. instead
1178          * we reserve credits to change i_size and put inode onto
1179          * orphan list. if needed truncate will extend or restart
1180          * transaction
1181          */
1182         oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
1183         oh->ot_credits += 3;
1184
1185         inode = osd_dt_obj(dt)->oo_inode;
1186         LASSERT(inode);
1187
1188         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
1189                                    true, true, NULL, false);
1190         RETURN(rc);
1191 }
1192
1193 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1194                      __u64 start, __u64 end, struct thandle *th,
1195                      struct lustre_capa *capa)
1196 {
1197         struct osd_thandle *oh;
1198         struct osd_object  *obj = osd_dt_obj(dt);
1199         struct inode       *inode = obj->oo_inode;
1200         handle_t           *h;
1201         tid_t               tid;
1202         int                 rc, rc2 = 0;
1203         ENTRY;
1204
1205         LASSERT(end == OBD_OBJECT_EOF);
1206         LASSERT(dt_object_exists(dt));
1207         LASSERT(osd_invariant(obj));
1208
1209         LASSERT(th);
1210         oh = container_of(th, struct osd_thandle, ot_super);
1211         LASSERT(oh->ot_handle->h_transaction != NULL);
1212
1213         OSD_EXEC_OP(th, punch);
1214
1215         tid = oh->ot_handle->h_transaction->t_tid;
1216
1217         rc = vmtruncate(inode, start);
1218
1219         /*
1220          * For a partial-page truncate, flush the page to disk immediately to
1221          * avoid data corruption during direct disk write.  b=17397
1222          */
1223         if (rc == 0 && (start & ~CFS_PAGE_MASK) != 0)
1224                 rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
1225
1226         h = journal_current_handle();
1227         LASSERT(h != NULL);
1228         LASSERT(h == oh->ot_handle);
1229
1230         if (tid != h->h_transaction->t_tid) {
1231                 int credits = oh->ot_credits;
1232                 /*
1233                  * transaction has changed during truncate
1234                  * we need to restart the handle with our credits
1235                  */
1236                 if (h->h_buffer_credits < credits) {
1237                         if (ldiskfs_journal_extend(h, credits))
1238                                 rc2 = ldiskfs_journal_restart(h, credits);
1239                 }
1240         }
1241
1242         RETURN(rc == 0 ? rc2 : rc);
1243 }
1244
1245 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
1246                           struct ll_user_fiemap *fm)
1247 {
1248         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1249         struct osd_thread_info *info   = osd_oti_get(env);
1250         struct dentry          *dentry = &info->oti_obj_dentry;
1251         struct file            *file   = &info->oti_file;
1252         mm_segment_t            saved_fs;
1253         int rc;
1254
1255         LASSERT(inode);
1256         dentry->d_inode = inode;
1257         file->f_dentry = dentry;
1258         file->f_mapping = inode->i_mapping;
1259         file->f_op = inode->i_fop;
1260
1261         saved_fs = get_fs();
1262         set_fs(get_ds());
1263         /* ldiskfs_ioctl does not have a inode argument */
1264         if (inode->i_fop->unlocked_ioctl)
1265                 rc = inode->i_fop->unlocked_ioctl(file, FSFILT_IOC_FIEMAP,
1266                                                   (long)fm);
1267         else
1268                 rc = -ENOTTY;
1269         set_fs(saved_fs);
1270         return rc;
1271 }
1272
1273 /*
1274  * in some cases we may need declare methods for objects being created
1275  * e.g., when we create symlink
1276  */
1277 const struct dt_body_operations osd_body_ops_new = {
1278         .dbo_declare_write = osd_declare_write,
1279 };
1280
1281 const struct dt_body_operations osd_body_ops = {
1282         .dbo_read                 = osd_read,
1283         .dbo_declare_write        = osd_declare_write,
1284         .dbo_write                = osd_write,
1285         .dbo_bufs_get             = osd_bufs_get,
1286         .dbo_bufs_put             = osd_bufs_put,
1287         .dbo_write_prep           = osd_write_prep,
1288         .dbo_declare_write_commit = osd_declare_write_commit,
1289         .dbo_write_commit         = osd_write_commit,
1290         .dbo_read_prep            = osd_read_prep,
1291         .do_declare_punch         = osd_declare_punch,
1292         .do_punch                 = osd_punch,
1293         .dbo_fiemap_get           = osd_fiemap_get,
1294 };
1295