Whamcloud - gitweb
065f2e4fce39f05828e5e0728594a15778b12b70
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osd/osd_io.c
37  *
38  * body operations
39  *
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
42  *
43  */
44
45 /* LUSTRE_VERSION_CODE */
46 #include <lustre_ver.h>
47 /* prerequisite for linux/xattr.h */
48 #include <linux/types.h>
49 /* prerequisite for linux/xattr.h */
50 #include <linux/fs.h>
51
52 /*
53  * struct OBD_{ALLOC,FREE}*()
54  * OBD_FAIL_CHECK
55  */
56 #include <obd_support.h>
57
58 #include "osd_internal.h"
59
60 /* ext_depth() */
61 #include <ldiskfs/ldiskfs_extents.h>
62
63 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
64                             int rw, int line, int pages)
65 {
66         int blocks, i;
67
68         LASSERTF(iobuf->dr_elapsed_valid == 0,
69                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
70                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
71                  iobuf->dr_init_at);
72         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
73
74         init_waitqueue_head(&iobuf->dr_wait);
75         atomic_set(&iobuf->dr_numreqs, 0);
76         iobuf->dr_npages = 0;
77         iobuf->dr_error = 0;
78         iobuf->dr_dev = d;
79         iobuf->dr_frags = 0;
80         iobuf->dr_elapsed = 0;
81         /* must be counted before, so assert */
82         iobuf->dr_rw = rw;
83         iobuf->dr_init_at = line;
84
85         blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
86         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
87                 LASSERT(iobuf->dr_pg_buf.lb_len >=
88                         pages * sizeof(iobuf->dr_pages[0]));
89                 return 0;
90         }
91
92         /* start with 1MB for 4K blocks */
93         i = 256;
94         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
95                 i <<= 1;
96
97         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
98                (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
99         pages = i;
100         blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
101         iobuf->dr_max_pages = 0;
102         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
103                (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
104
105         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
106         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
107         if (unlikely(iobuf->dr_blocks == NULL))
108                 return -ENOMEM;
109
110         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
111         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
112         if (unlikely(iobuf->dr_pages == NULL))
113                 return -ENOMEM;
114
115         iobuf->dr_max_pages = pages;
116
117         return 0;
118 }
119 #define osd_init_iobuf(dev, iobuf, rw, pages) \
120         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
121
122 static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
123 {
124         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
125         iobuf->dr_pages[iobuf->dr_npages++] = page;
126 }
127
128 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
129 {
130         int rw = iobuf->dr_rw;
131
132         if (iobuf->dr_elapsed_valid) {
133                 iobuf->dr_elapsed_valid = 0;
134                 LASSERT(iobuf->dr_dev == d);
135                 LASSERT(iobuf->dr_frags > 0);
136                 lprocfs_oh_tally(&d->od_brw_stats.
137                                  hist[BRW_R_DIO_FRAGS+rw],
138                                  iobuf->dr_frags);
139                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
140                                       iobuf->dr_elapsed);
141         }
142 }
143
144 #ifndef REQ_WRITE /* pre-2.6.35 */
145 #define __REQ_WRITE BIO_RW
146 #endif
147
148 static void dio_complete_routine(struct bio *bio, int error)
149 {
150         struct osd_iobuf *iobuf = bio->bi_private;
151 #ifdef HAVE_BVEC_ITER
152         struct bvec_iter iter;
153         struct bio_vec bvl;
154 #else
155         int iter;
156         struct bio_vec *bvl;
157 #endif
158
159         /* CAVEAT EMPTOR: possibly in IRQ context
160          * DO NOT record procfs stats here!!! */
161
162         if (unlikely(iobuf == NULL)) {
163                 CERROR("***** bio->bi_private is NULL!  This should never "
164                        "happen.  Normally, I would crash here, but instead I "
165                        "will dump the bio contents to the console.  Please "
166                        "report this to <https://jira.hpdd.intel.com/> , along "
167                        "with any interesting messages leading up to this point "
168                        "(like SCSI errors, perhaps).  Because bi_private is "
169                        "NULL, I can't wake up the thread that initiated this "
170                        "IO - you will probably have to reboot this node.\n");
171                 CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
172                        "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
173                        "bi_private: %p\n", bio->bi_next, bio->bi_flags,
174                         bio->bi_rw, bio->bi_vcnt, bio_idx(bio),
175                         bio_sectors(bio) << 9, bio->bi_end_io,
176                         atomic_read(&bio->bi_cnt), bio->bi_private);
177                 return;
178         }
179
180         /* the check is outside of the cycle for performance reason -bzzz */
181         if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
182                 bio_for_each_segment(bvl, bio, iter) {
183                         if (likely(error == 0))
184                                 SetPageUptodate(bvec_iter_page(&bvl, iter));
185                         LASSERT(PageLocked(bvec_iter_page(&bvl, iter)));
186                 }
187                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
188         } else {
189                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
190         }
191
192         /* any real error is good enough -bzzz */
193         if (error != 0 && iobuf->dr_error == 0)
194                 iobuf->dr_error = error;
195
196         /*
197          * set dr_elapsed before dr_numreqs turns to 0, otherwise
198          * it's possible that service thread will see dr_numreqs
199          * is zero, but dr_elapsed is not set yet, leading to lost
200          * data in this processing and an assertion in a subsequent
201          * call to OSD.
202          */
203         if (atomic_read(&iobuf->dr_numreqs) == 1) {
204                 iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
205                 iobuf->dr_elapsed_valid = 1;
206         }
207         if (atomic_dec_and_test(&iobuf->dr_numreqs))
208                 wake_up(&iobuf->dr_wait);
209
210         /* Completed bios used to be chained off iobuf->dr_bios and freed in
211          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
212          * mempool when serious on-disk fragmentation was encountered,
213          * deadlocking the OST.  The bios are now released as soon as complete
214          * so the pool cannot be exhausted while IOs are competing. bug 10076 */
215         bio_put(bio);
216 }
217
218 static void record_start_io(struct osd_iobuf *iobuf, int size)
219 {
220         struct osd_device    *osd = iobuf->dr_dev;
221         struct obd_histogram *h = osd->od_brw_stats.hist;
222
223         iobuf->dr_frags++;
224         atomic_inc(&iobuf->dr_numreqs);
225
226         if (iobuf->dr_rw == 0) {
227                 atomic_inc(&osd->od_r_in_flight);
228                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
229                                  atomic_read(&osd->od_r_in_flight));
230                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
231         } else if (iobuf->dr_rw == 1) {
232                 atomic_inc(&osd->od_w_in_flight);
233                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
234                                  atomic_read(&osd->od_w_in_flight));
235                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
236         } else {
237                 LBUG();
238         }
239 }
240
241 static void osd_submit_bio(int rw, struct bio *bio)
242 {
243         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
244         if (rw == 0)
245                 submit_bio(READ, bio);
246         else
247                 submit_bio(WRITE, bio);
248 }
249
250 static int can_be_merged(struct bio *bio, sector_t sector)
251 {
252         if (bio == NULL)
253                 return 0;
254
255         return bio_end_sector(bio) == sector ? 1 : 0;
256 }
257
258 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
259                       struct osd_iobuf *iobuf)
260 {
261         int            blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
262         struct page  **pages = iobuf->dr_pages;
263         int            npages = iobuf->dr_npages;
264         sector_t      *blocks = iobuf->dr_blocks;
265         int            total_blocks = npages * blocks_per_page;
266         int            sector_bits = inode->i_sb->s_blocksize_bits - 9;
267         unsigned int   blocksize = inode->i_sb->s_blocksize;
268         struct bio    *bio = NULL;
269         struct page   *page;
270         unsigned int   page_offset;
271         sector_t       sector;
272         int            nblocks;
273         int            block_idx;
274         int            page_idx;
275         int            i;
276         int            rc = 0;
277         ENTRY;
278
279         LASSERT(iobuf->dr_npages == npages);
280
281         osd_brw_stats_update(osd, iobuf);
282         iobuf->dr_start_time = cfs_time_current();
283
284         for (page_idx = 0, block_idx = 0;
285              page_idx < npages;
286              page_idx++, block_idx += blocks_per_page) {
287
288                 page = pages[page_idx];
289                 LASSERT(block_idx + blocks_per_page <= total_blocks);
290
291                 for (i = 0, page_offset = 0;
292                      i < blocks_per_page;
293                      i += nblocks, page_offset += blocksize * nblocks) {
294
295                         nblocks = 1;
296
297                         if (blocks[block_idx + i] == 0) {  /* hole */
298                                 LASSERTF(iobuf->dr_rw == 0,
299                                          "page_idx %u, block_idx %u, i %u\n",
300                                          page_idx, block_idx, i);
301                                 memset(kmap(page) + page_offset, 0, blocksize);
302                                 kunmap(page);
303                                 continue;
304                         }
305
306                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
307
308                         /* Additional contiguous file blocks? */
309                         while (i + nblocks < blocks_per_page &&
310                                (sector + (nblocks << sector_bits)) ==
311                                ((sector_t)blocks[block_idx + i + nblocks] <<
312                                 sector_bits))
313                                 nblocks++;
314
315                         if (bio != NULL &&
316                             can_be_merged(bio, sector) &&
317                             bio_add_page(bio, page,
318                                          blocksize * nblocks, page_offset) != 0)
319                                 continue;       /* added this frag OK */
320
321                         if (bio != NULL) {
322                                 struct request_queue *q =
323                                         bdev_get_queue(bio->bi_bdev);
324                                 unsigned int bi_size = bio_sectors(bio) << 9;
325
326                                 /* Dang! I have to fragment this I/O */
327                                 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
328                                        "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
329                                        bi_size, bio->bi_vcnt, bio->bi_max_vecs,
330                                        bio_sectors(bio),
331                                        queue_max_sectors(q),
332                                        bio_phys_segments(q, bio),
333                                        queue_max_phys_segments(q),
334                                        0, queue_max_hw_segments(q));
335                                 record_start_io(iobuf, bi_size);
336                                 osd_submit_bio(iobuf->dr_rw, bio);
337                         }
338
339                         /* allocate new bio */
340                         bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
341                                                       (npages - page_idx) *
342                                                       blocks_per_page));
343                         if (bio == NULL) {
344                                 CERROR("Can't allocate bio %u*%u = %u pages\n",
345                                        (npages - page_idx), blocks_per_page,
346                                        (npages - page_idx) * blocks_per_page);
347                                 rc = -ENOMEM;
348                                 goto out;
349                         }
350
351                         bio->bi_bdev = inode->i_sb->s_bdev;
352                         bio_set_sector(bio, sector);
353                         bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
354                         bio->bi_end_io = dio_complete_routine;
355                         bio->bi_private = iobuf;
356
357                         rc = bio_add_page(bio, page,
358                                           blocksize * nblocks, page_offset);
359                         LASSERT(rc != 0);
360                 }
361         }
362
363         if (bio != NULL) {
364                 record_start_io(iobuf, bio_sectors(bio) << 9);
365                 osd_submit_bio(iobuf->dr_rw, bio);
366                 rc = 0;
367         }
368
369 out:
370         /* in order to achieve better IO throughput, we don't wait for writes
371          * completion here. instead we proceed with transaction commit in
372          * parallel and wait for IO completion once transaction is stopped
373          * see osd_trans_stop() for more details -bzzz */
374         if (iobuf->dr_rw == 0) {
375                 wait_event(iobuf->dr_wait,
376                            atomic_read(&iobuf->dr_numreqs) == 0);
377                 osd_fini_iobuf(osd, iobuf);
378         }
379
380         if (rc == 0)
381                 rc = iobuf->dr_error;
382         RETURN(rc);
383 }
384
385 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
386                                    struct niobuf_local *lnb)
387 {
388         ENTRY;
389
390         *nrpages = 0;
391
392         while (len > 0) {
393                 int poff = offset & (PAGE_CACHE_SIZE - 1);
394                 int plen = PAGE_CACHE_SIZE - poff;
395
396                 if (plen > len)
397                         plen = len;
398                 lnb->lnb_file_offset = offset;
399                 lnb->lnb_page_offset = poff;
400                 lnb->lnb_len = plen;
401                 /* lnb->lnb_flags = rnb->rnb_flags; */
402                 lnb->lnb_flags = 0;
403                 lnb->lnb_page = NULL;
404                 lnb->lnb_rc = 0;
405
406                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
407                          (long long) len);
408                 offset += plen;
409                 len -= plen;
410                 lnb++;
411                 (*nrpages)++;
412         }
413
414         RETURN(0);
415 }
416
417 static struct page *osd_get_page(struct dt_object *dt, loff_t offset, int rw)
418 {
419         struct inode      *inode = osd_dt_obj(dt)->oo_inode;
420         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
421         struct page       *page;
422
423         LASSERT(inode);
424
425         page = find_or_create_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
426                                    GFP_NOFS | __GFP_HIGHMEM);
427         if (unlikely(page == NULL))
428                 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
429
430         return page;
431 }
432
433 /*
434  * there are following "locks":
435  * journal_start
436  * i_mutex
437  * page lock
438  *
439  * osd write path:
440  *  - lock page(s)
441  *  - journal_start
442  *  - truncate_sem
443  *
444  * ext4 vmtruncate:
445  *  - lock pages, unlock
446  *  - journal_start
447  *  - lock partial page
448  *  - i_data_sem
449  *
450  */
451
452 /**
453  * Unlock and release pages loaded by osd_bufs_get()
454  *
455  * Unlock \a npages pages from \a lnb and drop the refcount on them.
456  *
457  * \param env           thread execution environment
458  * \param dt            dt object undergoing IO (OSD object + methods)
459  * \param lnb           array of pages undergoing IO
460  * \param npages        number of pages in \a lnb
461  *
462  * \retval 0            always
463  */
464 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
465                         struct niobuf_local *lnb, int npages)
466 {
467         int i;
468
469         for (i = 0; i < npages; i++) {
470                 if (lnb[i].lnb_page == NULL)
471                         continue;
472                 LASSERT(PageLocked(lnb[i].lnb_page));
473                 unlock_page(lnb[i].lnb_page);
474                 page_cache_release(lnb[i].lnb_page);
475                 lu_object_put(env, &dt->do_lu);
476                 lnb[i].lnb_page = NULL;
477         }
478
479         RETURN(0);
480 }
481
482 /**
483  * Load and lock pages undergoing IO
484  *
485  * Pages as described in the \a lnb array are fetched (from disk or cache)
486  * and locked for IO by the caller.
487  *
488  * DLM locking protects us from write and truncate competing for same region,
489  * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
490  * It's possible the writeout on a such a page is in progress when we access
491  * it. It's also possible that during this writeout we put new (partial) data
492  * into the page, but won't be able to proceed in filter_commitrw_write().
493  * Therefore, just wait for writeout completion as it should be rare enough.
494  *
495  * \param env           thread execution environment
496  * \param dt            dt object undergoing IO (OSD object + methods)
497  * \param pos           byte offset of IO start
498  * \param len           number of bytes of IO
499  * \param lnb           array of extents undergoing IO
500  * \param rw            read or write operation?
501  * \param capa          capabilities
502  *
503  * \retval pages        (zero or more) loaded successfully
504  * \retval -ENOMEM      on memory/page allocation error
505  */
506 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
507                         loff_t pos, ssize_t len, struct niobuf_local *lnb,
508                         int rw)
509 {
510         struct osd_object   *obj    = osd_dt_obj(dt);
511         int npages, i, rc = 0;
512
513         LASSERT(obj->oo_inode);
514
515         osd_map_remote_to_local(pos, len, &npages, lnb);
516
517         for (i = 0; i < npages; i++, lnb++) {
518                 lnb->lnb_page = osd_get_page(dt, lnb->lnb_file_offset, rw);
519                 if (lnb->lnb_page == NULL)
520                         GOTO(cleanup, rc = -ENOMEM);
521
522                 wait_on_page_writeback(lnb->lnb_page);
523                 BUG_ON(PageWriteback(lnb->lnb_page));
524
525                 lu_object_get(&dt->do_lu);
526         }
527
528         RETURN(i);
529
530 cleanup:
531         if (i > 0)
532                 osd_bufs_put(env, dt, lnb - i, i);
533         return rc;
534 }
535
536 #ifndef HAVE_LDISKFS_MAP_BLOCKS
537
538 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
539 #define ldiskfs_ext_pblock(ex) ext_pblock((ex))
540 #endif
541
542 struct bpointers {
543         sector_t *blocks;
544         unsigned long start;
545         int num;
546         int init_num;
547         int create;
548 };
549
550 static long ldiskfs_ext_find_goal(struct inode *inode,
551                                   struct ldiskfs_ext_path *path,
552                                   unsigned long block, int *aflags)
553 {
554         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
555         unsigned long bg_start;
556         unsigned long colour;
557         int depth;
558
559         if (path) {
560                 struct ldiskfs_extent *ex;
561                 depth = path->p_depth;
562
563                 /* try to predict block placement */
564                 if ((ex = path[depth].p_ext))
565                         return ldiskfs_ext_pblock(ex) +
566                                 (block - le32_to_cpu(ex->ee_block));
567
568                 /* it looks index is empty
569                  * try to find starting from index itself */
570                 if (path[depth].p_bh)
571                         return path[depth].p_bh->b_blocknr;
572         }
573
574         /* OK. use inode's group */
575         bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) +
576                 le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block);
577         colour = (current->pid % 16) *
578                 (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16);
579         return bg_start + colour + block;
580 }
581
582 static unsigned long new_blocks(handle_t *handle, struct inode *inode,
583                                 struct ldiskfs_ext_path *path,
584                                 unsigned long block, unsigned long *count,
585                                 int *err)
586 {
587         struct ldiskfs_allocation_request ar;
588         unsigned long pblock;
589         int aflags;
590
591         /* find neighbour allocated blocks */
592         ar.lleft = block;
593         *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
594         if (*err)
595                 return 0;
596         ar.lright = block;
597         *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright);
598         if (*err)
599                 return 0;
600
601         /* allocate new block */
602         ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags);
603         ar.inode = inode;
604         ar.logical = block;
605         ar.len = *count;
606         ar.flags = LDISKFS_MB_HINT_DATA;
607         pblock = ldiskfs_mb_new_blocks(handle, &ar, err);
608         *count = ar.len;
609         return pblock;
610 }
611
612 static int ldiskfs_ext_new_extent_cb(struct inode *inode,
613                                      struct ldiskfs_ext_path *path,
614                                      struct ldiskfs_ext_cache *cex,
615 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
616                                      struct ldiskfs_extent *ex,
617 #endif
618                                      void *cbdata)
619 {
620         struct bpointers *bp = cbdata;
621         struct ldiskfs_extent nex;
622         unsigned long pblock;
623         unsigned long tgen;
624         int err, i;
625         unsigned long count;
626         handle_t *handle;
627
628 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
629         if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) {
630 #else
631         if ((cex->ec_len != 0) && (cex->ec_start != 0)) {
632 #endif
633                 err = EXT_CONTINUE;
634                 goto map;
635         }
636
637         if (bp->create == 0) {
638                 i = 0;
639                 if (cex->ec_block < bp->start)
640                         i = bp->start - cex->ec_block;
641                 if (i >= cex->ec_len)
642                         CERROR("nothing to do?! i = %d, e_num = %u\n",
643                                         i, cex->ec_len);
644                 for (; i < cex->ec_len && bp->num; i++) {
645                         *(bp->blocks) = 0;
646                         bp->blocks++;
647                         bp->num--;
648                         bp->start++;
649                 }
650
651                 return EXT_CONTINUE;
652         }
653
654         tgen = LDISKFS_I(inode)->i_ext_generation;
655         count = ldiskfs_ext_calc_credits_for_insert(inode, path);
656
657         handle = osd_journal_start(inode, LDISKFS_HT_MISC,
658                                    count + LDISKFS_ALLOC_NEEDED + 1);
659         if (IS_ERR(handle)) {
660                 return PTR_ERR(handle);
661         }
662
663         if (tgen != LDISKFS_I(inode)->i_ext_generation) {
664                 /* the tree has changed. so path can be invalid at moment */
665                 ldiskfs_journal_stop(handle);
666                 return EXT_REPEAT;
667         }
668
669         /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not
670          * protected by i_data_sem as whole. so we patch it to store
671          * generation to path and now verify the tree hasn't changed */
672         down_write((&LDISKFS_I(inode)->i_data_sem));
673
674         /* validate extent, make sure the extent tree does not changed */
675         if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) {
676                 /* cex is invalid, try again */
677                 up_write(&LDISKFS_I(inode)->i_data_sem);
678                 ldiskfs_journal_stop(handle);
679                 return EXT_REPEAT;
680         }
681
682         count = cex->ec_len;
683         pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err);
684         if (!pblock)
685                 goto out;
686         BUG_ON(count > cex->ec_len);
687
688         /* insert new extent */
689         nex.ee_block = cpu_to_le32(cex->ec_block);
690         ldiskfs_ext_store_pblock(&nex, pblock);
691         nex.ee_len = cpu_to_le16(count);
692         err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0);
693         if (err) {
694                 /* free data blocks we just allocated */
695                 /* not a good idea to call discard here directly,
696                  * but otherwise we'd need to call it every free() */
697                 ldiskfs_discard_preallocations(inode);
698 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
699                 ldiskfs_free_blocks(handle, inode, NULL,
700                                     ldiskfs_ext_pblock(&nex),
701                                     le16_to_cpu(nex.ee_len), 0);
702 #else
703                 ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex),
704                                     le16_to_cpu(nex.ee_len), 0);
705 #endif
706                 goto out;
707         }
708
709         /*
710          * Putting len of the actual extent we just inserted,
711          * we are asking ldiskfs_ext_walk_space() to continue
712          * scaning after that block
713          */
714         cex->ec_len = le16_to_cpu(nex.ee_len);
715         cex->ec_start = ldiskfs_ext_pblock(&nex);
716         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
717         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
718
719 out:
720         up_write((&LDISKFS_I(inode)->i_data_sem));
721         ldiskfs_journal_stop(handle);
722 map:
723         if (err >= 0) {
724                 /* map blocks */
725                 if (bp->num == 0) {
726                         CERROR("hmm. why do we find this extent?\n");
727                         CERROR("initial space: %lu:%u\n",
728                                 bp->start, bp->init_num);
729 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
730                         CERROR("current extent: %u/%u/%llu %d\n",
731                                 cex->ec_block, cex->ec_len,
732                                 (unsigned long long)cex->ec_start,
733                                 cex->ec_type);
734 #else
735                         CERROR("current extent: %u/%u/%llu\n",
736                                 cex->ec_block, cex->ec_len,
737                                 (unsigned long long)cex->ec_start);
738 #endif
739                 }
740                 i = 0;
741                 if (cex->ec_block < bp->start)
742                         i = bp->start - cex->ec_block;
743                 if (i >= cex->ec_len)
744                         CERROR("nothing to do?! i = %d, e_num = %u\n",
745                                         i, cex->ec_len);
746                 for (; i < cex->ec_len && bp->num; i++) {
747                         *(bp->blocks) = cex->ec_start + i;
748 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
749                         if (cex->ec_type != LDISKFS_EXT_CACHE_EXTENT) {
750 #else
751                         if ((cex->ec_len == 0) || (cex->ec_start == 0)) {
752 #endif
753                                 /* unmap any possible underlying metadata from
754                                  * the block device mapping.  bug 6998. */
755                                 unmap_underlying_metadata(inode->i_sb->s_bdev,
756                                                           *(bp->blocks));
757                         }
758                         bp->blocks++;
759                         bp->num--;
760                         bp->start++;
761                 }
762         }
763         return err;
764 }
765
766 static int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long index,
767                                    int clen, sector_t *blocks, int create)
768 {
769         int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
770         struct bpointers bp;
771         int err;
772
773         if (index + clen >= inode->i_sb->s_maxbytes >> PAGE_SHIFT)
774                 return -EFBIG;
775
776         bp.blocks = blocks;
777         bp.start = index * blocks_per_page;
778         bp.init_num = bp.num = clen * blocks_per_page;
779         bp.create = create;
780
781         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
782                bp.start, bp.start + bp.num - 1, (unsigned)inode->i_ino);
783
784         err = ldiskfs_ext_walk_space(inode, bp.start, bp.num,
785                                      ldiskfs_ext_new_extent_cb, &bp);
786         ldiskfs_ext_invalidate_cache(inode);
787
788         return err;
789 }
790
791 static int osd_ldiskfs_map_bm_inode_pages(struct inode *inode,
792                                           struct page **page, int pages,
793                                           sector_t *blocks, int create)
794 {
795         int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
796         pgoff_t bitmap_max_page_index;
797         sector_t *b;
798         int rc = 0, i;
799
800         bitmap_max_page_index = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes >>
801                                 PAGE_SHIFT;
802         for (i = 0, b = blocks; i < pages; i++, page++) {
803                 if ((*page)->index + 1 >= bitmap_max_page_index) {
804                         rc = -EFBIG;
805                         break;
806                 }
807                 rc = ldiskfs_map_inode_page(inode, *page, b, create);
808                 if (rc) {
809                         CERROR("ino %lu, blk %llu create %d: rc %d\n",
810                                inode->i_ino,
811                                (unsigned long long)*b, create, rc);
812                         break;
813                 }
814                 b += blocks_per_page;
815         }
816         return rc;
817 }
818
819 static int osd_ldiskfs_map_ext_inode_pages(struct inode *inode,
820                                            struct page **page,
821                                            int pages, sector_t *blocks,
822                                            int create)
823 {
824         int rc = 0, i = 0, clen = 0;
825         struct page *fp = NULL;
826
827         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
828                 inode->i_ino, pages, (*page)->index);
829
830         /* pages are sorted already. so, we just have to find
831          * contig. space and process them properly */
832         while (i < pages) {
833                 if (fp == NULL) {
834                         /* start new extent */
835                         fp = *page++;
836                         clen = 1;
837                         i++;
838                         continue;
839                 } else if (fp->index + clen == (*page)->index) {
840                         /* continue the extent */
841                         page++;
842                         clen++;
843                         i++;
844                         continue;
845                 }
846
847                 /* process found extent */
848                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
849                                              blocks, create);
850                 if (rc)
851                         GOTO(cleanup, rc);
852
853                 /* look for next extent */
854                 fp = NULL;
855                 blocks += clen * (PAGE_CACHE_SIZE >> inode->i_blkbits);
856         }
857
858         if (fp)
859                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
860                                              blocks, create);
861
862 cleanup:
863         return rc;
864 }
865
866 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
867                                        int pages, sector_t *blocks,
868                                        int create)
869 {
870         int rc;
871
872         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
873                 rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
874                                                      blocks, create);
875                 return rc;
876         }
877         rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create);
878
879         return rc;
880 }
881 #else
882 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
883                                        int pages, sector_t *blocks,
884                                        int create)
885 {
886         int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
887         int rc = 0, i = 0;
888         struct page *fp = NULL;
889         int clen = 0;
890         pgoff_t max_page_index;
891
892         max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
893
894         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
895                 inode->i_ino, pages, (*page)->index);
896
897         /* pages are sorted already. so, we just have to find
898          * contig. space and process them properly */
899         while (i < pages) {
900                 long blen, total = 0;
901                 handle_t *handle = NULL;
902                 struct ldiskfs_map_blocks map = { 0 };
903
904                 if (fp == NULL) { /* start new extent */
905                         fp = *page++;
906                         clen = 1;
907                         if (++i != pages)
908                                 continue;
909                 } else if (fp->index + clen == (*page)->index) {
910                         /* continue the extent */
911                         page++;
912                         clen++;
913                         if (++i != pages)
914                                 continue;
915                 }
916                 if (fp->index + clen >= max_page_index)
917                         GOTO(cleanup, rc = -EFBIG);
918                 /* process found extent */
919                 map.m_lblk = fp->index * blocks_per_page;
920                 map.m_len = blen = clen * blocks_per_page;
921                 if (create) {
922                         create = LDISKFS_GET_BLOCKS_CREATE;
923                         handle = ldiskfs_journal_current_handle();
924                         LASSERT(handle != NULL);
925                 }
926 cont_map:
927                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
928                 if (rc >= 0) {
929                         int c = 0;
930                         for (; total < blen && c < map.m_len; c++, total++) {
931                                 if (rc == 0) {
932                                         *(blocks + total) = 0;
933                                         total++;
934                                         break;
935                                 } else {
936                                         *(blocks + total) = map.m_pblk + c;
937                                         /* unmap any possible underlying
938                                          * metadata from the block device
939                                          * mapping.  bug 6998. */
940                                         if ((map.m_flags & LDISKFS_MAP_NEW) &&
941                                             create)
942                                                 unmap_underlying_metadata(
943                                                         inode->i_sb->s_bdev,
944                                                         map.m_pblk + c);
945                                 }
946                         }
947                         rc = 0;
948                 }
949                 if (rc == 0 && total < blen) {
950                         map.m_lblk = fp->index * blocks_per_page + total;
951                         map.m_len = blen - total;
952                         goto cont_map;
953                 }
954                 if (rc != 0)
955                         GOTO(cleanup, rc);
956
957                 /* look for next extent */
958                 fp = NULL;
959                 blocks += blocks_per_page * clen;
960         }
961 cleanup:
962         return rc;
963 }
964 #endif /* HAVE_LDISKFS_MAP_BLOCKS */
965
966 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
967                           struct niobuf_local *lnb, int npages)
968 {
969         struct osd_thread_info *oti   = osd_oti_get(env);
970         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
971         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
972         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
973         struct timeval          start;
974         struct timeval          end;
975         unsigned long           timediff;
976         ssize_t                 isize;
977         __s64                   maxidx;
978         int                     rc = 0;
979         int                     i;
980         int                     cache = 0;
981
982         LASSERT(inode);
983
984         rc = osd_init_iobuf(osd, iobuf, 0, npages);
985         if (unlikely(rc != 0))
986                 RETURN(rc);
987
988         isize = i_size_read(inode);
989         maxidx = ((isize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 1;
990
991         if (osd->od_writethrough_cache)
992                 cache = 1;
993         if (isize > osd->od_readcache_max_filesize)
994                 cache = 0;
995
996         do_gettimeofday(&start);
997         for (i = 0; i < npages; i++) {
998
999                 if (cache == 0)
1000                         generic_error_remove_page(inode->i_mapping,
1001                                                   lnb[i].lnb_page);
1002
1003                 /*
1004                  * till commit the content of the page is undefined
1005                  * we'll set it uptodate once bulk is done. otherwise
1006                  * subsequent reads can access non-stable data
1007                  */
1008                 ClearPageUptodate(lnb[i].lnb_page);
1009
1010                 if (lnb[i].lnb_len == PAGE_CACHE_SIZE)
1011                         continue;
1012
1013                 if (maxidx >= lnb[i].lnb_page->index) {
1014                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1015                 } else {
1016                         long off;
1017                         char *p = kmap(lnb[i].lnb_page);
1018
1019                         off = lnb[i].lnb_page_offset;
1020                         if (off)
1021                                 memset(p, 0, off);
1022                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1023                               ~PAGE_MASK;
1024                         if (off)
1025                                 memset(p + off, 0, PAGE_CACHE_SIZE - off);
1026                         kunmap(lnb[i].lnb_page);
1027                 }
1028         }
1029         do_gettimeofday(&end);
1030         timediff = cfs_timeval_sub(&end, &start, NULL);
1031         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1032
1033         if (iobuf->dr_npages) {
1034                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1035                                                  iobuf->dr_npages,
1036                                                  iobuf->dr_blocks, 0);
1037                 if (likely(rc == 0)) {
1038                         rc = osd_do_bio(osd, inode, iobuf);
1039                         /* do IO stats for preparation reads */
1040                         osd_fini_iobuf(osd, iobuf);
1041                 }
1042         }
1043         RETURN(rc);
1044 }
1045
1046 /* Check if a block is allocated or not */
1047 static int osd_is_mapped(struct inode *inode, u64 offset)
1048 {
1049         sector_t (*fs_bmap)(struct address_space *, sector_t);
1050
1051         fs_bmap = inode->i_mapping->a_ops->bmap;
1052
1053         /* We can't know if we are overwriting or not */
1054         if (unlikely(fs_bmap == NULL))
1055                 return 0;
1056
1057         if (i_size_read(inode) == 0)
1058                 return 0;
1059
1060         /* Beyond EOF, must not be mapped */
1061         if (((i_size_read(inode) - 1) >> inode->i_blkbits) <
1062             (offset >> inode->i_blkbits))
1063                 return 0;
1064
1065         if (fs_bmap(inode->i_mapping, offset >> inode->i_blkbits) == 0)
1066                 return 0;
1067
1068         return 1;
1069 }
1070
1071 static int osd_declare_write_commit(const struct lu_env *env,
1072                                     struct dt_object *dt,
1073                                     struct niobuf_local *lnb, int npages,
1074                                     struct thandle *handle)
1075 {
1076         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1077         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1078         struct osd_thandle      *oh;
1079         int                      extents = 1;
1080         int                      depth;
1081         int                      i;
1082         int                      newblocks;
1083         int                      rc = 0;
1084         int                      flags = 0;
1085         int                      credits = 0;
1086         bool                     ignore_quota = false;
1087         long long                quota_space = 0;
1088         ENTRY;
1089
1090         LASSERT(handle != NULL);
1091         oh = container_of0(handle, struct osd_thandle, ot_super);
1092         LASSERT(oh->ot_handle == NULL);
1093
1094         newblocks = npages;
1095
1096         /* calculate number of extents (probably better to pass nb) */
1097         for (i = 0; i < npages; i++) {
1098                 if (i && lnb[i].lnb_file_offset !=
1099                     lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
1100                         extents++;
1101
1102                 if (!osd_is_mapped(inode, lnb[i].lnb_file_offset))
1103                         quota_space += PAGE_CACHE_SIZE;
1104
1105                 /* ignore quota for the whole request if any page is from
1106                  * client cache or written by root.
1107                  *
1108                  * XXX once we drop the 1.8 client support, the checking
1109                  * for whether page is from cache can be simplified as:
1110                  * !(lnb[i].flags & OBD_BRW_SYNC)
1111                  *
1112                  * XXX we could handle this on per-lnb basis as done by
1113                  * grant. */
1114                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1115                     (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1116                     OBD_BRW_FROM_GRANT)
1117                         ignore_quota = true;
1118         }
1119
1120         /*
1121          * each extent can go into new leaf causing a split
1122          * 5 is max tree depth: inode + 4 index blocks
1123          * with blockmaps, depth is 3 at most
1124          */
1125         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1126                 /*
1127                  * many concurrent threads may grow tree by the time
1128                  * our transaction starts. so, consider 2 is a min depth
1129                  */
1130                 depth = ext_depth(inode);
1131                 depth = max(depth, 1) + 1;
1132                 newblocks += depth;
1133                 credits++; /* inode */
1134                 credits += depth * 2 * extents;
1135         } else {
1136                 depth = 3;
1137                 newblocks += depth;
1138                 credits++; /* inode */
1139                 credits += depth * extents;
1140         }
1141
1142         /* quota space for metadata blocks */
1143         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1144
1145         /* quota space should be reported in 1K blocks */
1146         quota_space = toqb(quota_space);
1147
1148         /* each new block can go in different group (bitmap + gd) */
1149
1150         /* we can't dirty more bitmap blocks than exist */
1151         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1152                 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1153         else
1154                 credits += newblocks;
1155
1156         /* we can't dirty more gd blocks than exist */
1157         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1158                 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1159         else
1160                 credits += newblocks;
1161
1162         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1163
1164         /* make sure the over quota flags were not set */
1165         lnb[0].lnb_flags &= ~(OBD_BRW_OVER_USRQUOTA | OBD_BRW_OVER_GRPQUOTA);
1166
1167         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1168                                    quota_space, oh, osd_dt_obj(dt), true,
1169                                    &flags, ignore_quota);
1170
1171         /* we need only to store the overquota flags in the first lnb for
1172          * now, once we support multiple objects BRW, this code needs be
1173          * revised. */
1174         if (flags & QUOTA_FL_OVER_USRQUOTA)
1175                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1176         if (flags & QUOTA_FL_OVER_GRPQUOTA)
1177                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1178
1179         RETURN(rc);
1180 }
1181
1182 /* Check if a block is allocated or not */
1183 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1184                             struct niobuf_local *lnb, int npages,
1185                             struct thandle *thandle)
1186 {
1187         struct osd_thread_info *oti = osd_oti_get(env);
1188         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1189         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1190         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1191         loff_t isize;
1192         int rc = 0, i;
1193
1194         LASSERT(inode);
1195
1196         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1197         if (unlikely(rc != 0))
1198                 RETURN(rc);
1199
1200         isize = i_size_read(inode);
1201         ll_vfs_dq_init(inode);
1202
1203         for (i = 0; i < npages; i++) {
1204                 if (lnb[i].lnb_rc == -ENOSPC &&
1205                     osd_is_mapped(inode, lnb[i].lnb_file_offset)) {
1206                         /* Allow the write to proceed if overwriting an
1207                          * existing block */
1208                         lnb[i].lnb_rc = 0;
1209                 }
1210
1211                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1212                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1213                                lnb[i].lnb_rc);
1214                         LASSERT(lnb[i].lnb_page);
1215                         generic_error_remove_page(inode->i_mapping,
1216                                                   lnb[i].lnb_page);
1217                         continue;
1218                 }
1219
1220                 LASSERT(PageLocked(lnb[i].lnb_page));
1221                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1222
1223                 if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize)
1224                         isize = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1225
1226                 /*
1227                  * Since write and truncate are serialized by oo_sem, even
1228                  * partial-page truncate should not leave dirty pages in the
1229                  * page cache.
1230                  */
1231                 LASSERT(!PageDirty(lnb[i].lnb_page));
1232
1233                 SetPageUptodate(lnb[i].lnb_page);
1234
1235                 osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1236         }
1237
1238         osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1239
1240         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1241                 rc = -ENOSPC;
1242         } else if (iobuf->dr_npages > 0) {
1243                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1244                                                  iobuf->dr_npages,
1245                                                  iobuf->dr_blocks, 1);
1246         } else {
1247                 /* no pages to write, no transno is needed */
1248                 thandle->th_local = 1;
1249         }
1250
1251         if (likely(rc == 0)) {
1252                 if (isize > i_size_read(inode)) {
1253                         i_size_write(inode, isize);
1254                         LDISKFS_I(inode)->i_disksize = isize;
1255                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1256                 }
1257
1258                 rc = osd_do_bio(osd, inode, iobuf);
1259                 /* we don't do stats here as in read path because
1260                  * write is async: we'll do this in osd_put_bufs() */
1261         } else {
1262                 osd_fini_iobuf(osd, iobuf);
1263         }
1264
1265         osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1266
1267         if (unlikely(rc != 0)) {
1268                 /* if write fails, we should drop pages from the cache */
1269                 for (i = 0; i < npages; i++) {
1270                         if (lnb[i].lnb_page == NULL)
1271                                 continue;
1272                         LASSERT(PageLocked(lnb[i].lnb_page));
1273                         generic_error_remove_page(inode->i_mapping,
1274                                                   lnb[i].lnb_page);
1275                 }
1276         }
1277
1278         RETURN(rc);
1279 }
1280
1281 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1282                          struct niobuf_local *lnb, int npages)
1283 {
1284         struct osd_thread_info *oti = osd_oti_get(env);
1285         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1286         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1287         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1288         struct timeval start, end;
1289         unsigned long timediff;
1290         int rc = 0, i, cache = 0, cache_hits = 0, cache_misses = 0;
1291         loff_t isize;
1292
1293         LASSERT(inode);
1294
1295         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1296         if (unlikely(rc != 0))
1297                 RETURN(rc);
1298
1299         isize = i_size_read(inode);
1300
1301         if (osd->od_read_cache)
1302                 cache = 1;
1303         if (isize > osd->od_readcache_max_filesize)
1304                 cache = 0;
1305
1306         do_gettimeofday(&start);
1307         for (i = 0; i < npages; i++) {
1308
1309                 if (isize <= lnb[i].lnb_file_offset)
1310                         /* If there's no more data, abort early.
1311                          * lnb->lnb_rc == 0, so it's easy to detect later. */
1312                         break;
1313
1314                 if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len - 1)
1315                         lnb[i].lnb_rc = isize - lnb[i].lnb_file_offset;
1316                 else
1317                         lnb[i].lnb_rc = lnb[i].lnb_len;
1318
1319                 if (PageUptodate(lnb[i].lnb_page)) {
1320                         cache_hits++;
1321                 } else {
1322                         cache_misses++;
1323                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1324                 }
1325
1326                 if (cache == 0)
1327                         generic_error_remove_page(inode->i_mapping,
1328                                                   lnb[i].lnb_page);
1329         }
1330         do_gettimeofday(&end);
1331         timediff = cfs_timeval_sub(&end, &start, NULL);
1332         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1333
1334         if (cache_hits != 0)
1335                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1336                                     cache_hits);
1337         if (cache_misses != 0)
1338                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1339                                     cache_misses);
1340         if (cache_hits + cache_misses != 0)
1341                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1342                                     cache_hits + cache_misses);
1343
1344         if (iobuf->dr_npages) {
1345                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1346                                                  iobuf->dr_npages,
1347                                                  iobuf->dr_blocks, 0);
1348                 rc = osd_do_bio(osd, inode, iobuf);
1349
1350                 /* IO stats will be done in osd_bufs_put() */
1351         }
1352
1353         RETURN(rc);
1354 }
1355
1356 /*
1357  * XXX: Another layering violation for now.
1358  *
1359  * We don't want to use ->f_op->read methods, because generic file write
1360  *
1361  *         - serializes on ->i_sem, and
1362  *
1363  *         - does a lot of extra work like balance_dirty_pages(),
1364  *
1365  * which doesn't work for globally shared files like /last_rcvd.
1366  */
1367 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1368 {
1369         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1370
1371         memcpy(buffer, (char *)ei->i_data, buflen);
1372
1373         return  buflen;
1374 }
1375
1376 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1377 {
1378         struct buffer_head *bh;
1379         unsigned long block;
1380         int osize;
1381         int blocksize;
1382         int csize;
1383         int boffs;
1384         int err;
1385
1386         /* prevent reading after eof */
1387         spin_lock(&inode->i_lock);
1388         if (i_size_read(inode) < *offs + size) {
1389                 loff_t diff = i_size_read(inode) - *offs;
1390                 spin_unlock(&inode->i_lock);
1391                 if (diff < 0) {
1392                         CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
1393                                i_size_read(inode), *offs);
1394                         return -EBADR;
1395                 } else if (diff == 0) {
1396                         return 0;
1397                 } else {
1398                         size = diff;
1399                 }
1400         } else {
1401                 spin_unlock(&inode->i_lock);
1402         }
1403
1404         blocksize = 1 << inode->i_blkbits;
1405         osize = size;
1406         while (size > 0) {
1407                 block = *offs >> inode->i_blkbits;
1408                 boffs = *offs & (blocksize - 1);
1409                 csize = min(blocksize - boffs, size);
1410                 bh = ldiskfs_bread(NULL, inode, block, 0, &err);
1411                 if (err != 0) {
1412                         CERROR("%s: can't read %u@%llu on ino %lu: rc = %d\n",
1413                                LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
1414                                csize, *offs, inode->i_ino, err);
1415                         if (bh != NULL)
1416                                 brelse(bh);
1417                         return err;
1418                 }
1419
1420                 if (bh != NULL) {
1421                         memcpy(buf, bh->b_data + boffs, csize);
1422                         brelse(bh);
1423                 } else {
1424                         memset(buf, 0, csize);
1425                 }
1426
1427                 *offs += csize;
1428                 buf += csize;
1429                 size -= csize;
1430         }
1431         return osize;
1432 }
1433
1434 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1435                         struct lu_buf *buf, loff_t *pos)
1436 {
1437         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1438         int           rc;
1439
1440         /* Read small symlink from inode body as we need to maintain correct
1441          * on-disk symlinks for ldiskfs.
1442          */
1443         if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
1444             (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1445                 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
1446         else
1447                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1448
1449         return rc;
1450 }
1451
1452 static inline int osd_extents_enabled(struct super_block *sb,
1453                                       struct inode *inode)
1454 {
1455         if (inode != NULL) {
1456                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1457                         return 1;
1458         } else if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
1459                                 LDISKFS_FEATURE_INCOMPAT_EXTENTS)) {
1460                 return 1;
1461         }
1462         return 0;
1463 }
1464
1465 static inline int osd_calc_bkmap_credits(struct super_block *sb,
1466                                          struct inode *inode,
1467                                          const loff_t size,
1468                                          const loff_t pos,
1469                                          const int blocks)
1470 {
1471         int credits, bits, bs, i;
1472
1473         bits = sb->s_blocksize_bits;
1474         bs = 1 << bits;
1475
1476         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1477          * we do not expect blockmaps on the large files,
1478          * so let's shrink it to 2 levels (4GB files) */
1479
1480         /* this is default reservation: 2 levels */
1481         credits = (blocks + 2) * 3;
1482
1483         /* actual offset is unknown, hard to optimize */
1484         if (pos == -1)
1485                 return credits;
1486
1487         /* now check for few specific cases to optimize */
1488         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1489                 /* no indirects */
1490                 credits = blocks;
1491                 /* allocate if not allocated */
1492                 if (inode == NULL) {
1493                         credits += blocks * 2;
1494                         return credits;
1495                 }
1496                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1497                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1498                         if (LDISKFS_I(inode)->i_data[i] == 0)
1499                                 credits += 2;
1500                 }
1501         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1502                 /* single indirect */
1503                 credits = blocks * 3;
1504                 /* probably indirect block has been allocated already */
1505                 if (!inode || LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK])
1506                         credits += 3;
1507         }
1508
1509         return credits;
1510 }
1511
1512 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1513                                  const struct lu_buf *buf, loff_t _pos,
1514                                  struct thandle *handle)
1515 {
1516         struct osd_object  *obj  = osd_dt_obj(dt);
1517         struct inode       *inode = obj->oo_inode;
1518         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1519         struct osd_thandle *oh;
1520         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1521         int                 bits, bs;
1522         int                 depth, size;
1523         loff_t              pos;
1524         ENTRY;
1525
1526         LASSERT(buf != NULL);
1527         LASSERT(handle != NULL);
1528
1529         oh = container_of0(handle, struct osd_thandle, ot_super);
1530         LASSERT(oh->ot_handle == NULL);
1531
1532         size = buf->lb_len;
1533         bits = sb->s_blocksize_bits;
1534         bs = 1 << bits;
1535
1536         if (_pos == -1) {
1537                 /* if this is an append, then we
1538                  * should expect cross-block record */
1539                 pos = 0;
1540         } else {
1541                 pos = _pos;
1542         }
1543
1544         /* blocks to modify */
1545         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1546         LASSERT(blocks > 0);
1547
1548         if (inode != NULL && _pos != -1) {
1549                 /* object size in blocks */
1550                 est = (i_size_read(inode) + bs - 1) >> bits;
1551                 allocated = inode->i_blocks >> (bits - 9);
1552                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1553                         /* looks like an overwrite, no need to modify tree */
1554                         credits = blocks;
1555                         /* no need to modify i_size */
1556                         goto out;
1557                 }
1558         }
1559
1560         if (osd_extents_enabled(sb, inode)) {
1561                 /*
1562                  * many concurrent threads may grow tree by the time
1563                  * our transaction starts. so, consider 2 is a min depth
1564                  * for every level we may need to allocate a new block
1565                  * and take some entries from the old one. so, 3 blocks
1566                  * to allocate (bitmap, gd, itself) + old block - 4 per
1567                  * level.
1568                  */
1569                 depth = inode != NULL ? ext_depth(inode) : 0;
1570                 depth = max(depth, 1) + 1;
1571                 credits = depth;
1572                 /* if not append, then split may need to modify
1573                  * existing blocks moving entries into the new ones */
1574                 if (_pos == -1)
1575                         credits += depth;
1576                 /* blocks to store data: bitmap,gd,itself */
1577                 credits += blocks * 3;
1578         } else {
1579                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1580         }
1581         /* if inode is created as part of the transaction,
1582          * then it's counted already by the creation method */
1583         if (inode != NULL)
1584                 credits++;
1585
1586 out:
1587
1588         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1589
1590         /* dt_declare_write() is usually called for system objects, such
1591          * as llog or last_rcvd files. We needn't enforce quota on those
1592          * objects, so always set the lqi_space as 0. */
1593         if (inode != NULL)
1594                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1595                                            i_gid_read(inode), 0, oh, obj, true,
1596                                            NULL, false);
1597         RETURN(rc);
1598 }
1599
1600 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1601 {
1602         /* LU-2634: clear the extent format for fast symlink */
1603         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1604
1605         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1606         LDISKFS_I(inode)->i_disksize = buflen;
1607         i_size_write(inode, buflen);
1608         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1609
1610         return 0;
1611 }
1612
1613 int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
1614                              int write_NUL, loff_t *offs, handle_t *handle)
1615 {
1616         struct buffer_head *bh        = NULL;
1617         loff_t              offset    = *offs;
1618         loff_t              new_size  = i_size_read(inode);
1619         unsigned long       block;
1620         int                 blocksize = 1 << inode->i_blkbits;
1621         int                 err = 0;
1622         int                 size;
1623         int                 boffs;
1624         int                 dirty_inode = 0;
1625
1626         if (write_NUL) {
1627                 /*
1628                  * long symlink write does not count the NUL terminator in
1629                  * bufsize, we write it, and the inode's file size does not
1630                  * count the NUL terminator as well.
1631                  */
1632                 ((char *)buf)[bufsize] = '\0';
1633                 ++bufsize;
1634         }
1635         while (bufsize > 0) {
1636                 if (bh != NULL)
1637                         brelse(bh);
1638
1639                 block = offset >> inode->i_blkbits;
1640                 boffs = offset & (blocksize - 1);
1641                 size = min(blocksize - boffs, bufsize);
1642                 bh = ldiskfs_bread(handle, inode, block, 1, &err);
1643                 if (!bh) {
1644                         CERROR("%s: error reading offset %llu (block %lu): "
1645                                "rc = %d\n",
1646                                inode->i_sb->s_id, offset, block, err);
1647                         break;
1648                 }
1649
1650                 err = ldiskfs_journal_get_write_access(handle, bh);
1651                 if (err) {
1652                         CERROR("journal_get_write_access() returned error %d\n",
1653                                err);
1654                         break;
1655                 }
1656                 LASSERTF(boffs + size <= bh->b_size,
1657                          "boffs %d size %d bh->b_size %lu\n",
1658                          boffs, size, (unsigned long)bh->b_size);
1659                 memcpy(bh->b_data + boffs, buf, size);
1660                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
1661                 if (err)
1662                         break;
1663
1664                 if (offset + size > new_size)
1665                         new_size = offset + size;
1666                 offset += size;
1667                 bufsize -= size;
1668                 buf += size;
1669         }
1670         if (bh)
1671                 brelse(bh);
1672
1673         if (write_NUL)
1674                 --new_size;
1675         /* correct in-core and on-disk sizes */
1676         if (new_size > i_size_read(inode)) {
1677                 spin_lock(&inode->i_lock);
1678                 if (new_size > i_size_read(inode))
1679                         i_size_write(inode, new_size);
1680                 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
1681                         LDISKFS_I(inode)->i_disksize = i_size_read(inode);
1682                         dirty_inode = 1;
1683                 }
1684                 spin_unlock(&inode->i_lock);
1685                 if (dirty_inode)
1686                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1687         }
1688
1689         if (err == 0)
1690                 *offs = offset;
1691         return err;
1692 }
1693
1694 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1695                          const struct lu_buf *buf, loff_t *pos,
1696                          struct thandle *handle, int ignore_quota)
1697 {
1698         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1699         struct osd_thandle      *oh;
1700         ssize_t                 result;
1701         int                     is_link;
1702
1703         LASSERT(dt_object_exists(dt));
1704
1705         LASSERT(handle != NULL);
1706         LASSERT(inode != NULL);
1707         ll_vfs_dq_init(inode);
1708
1709         /* XXX: don't check: one declared chunk can be used many times */
1710         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
1711
1712         oh = container_of(handle, struct osd_thandle, ot_super);
1713         LASSERT(oh->ot_handle->h_transaction != NULL);
1714         osd_trans_exec_op(env, handle, OSD_OT_WRITE);
1715
1716         /* Write small symlink to inode body as we need to maintain correct
1717          * on-disk symlinks for ldiskfs.
1718          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
1719          * does not count it in.
1720          */
1721         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
1722         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1723                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
1724         else
1725                 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
1726                                                   buf->lb_len, is_link, pos,
1727                                                   oh->ot_handle);
1728         if (result == 0)
1729                 result = buf->lb_len;
1730
1731         osd_trans_exec_check(env, handle, OSD_OT_WRITE);
1732
1733         return result;
1734 }
1735
1736 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1737                              __u64 start, __u64 end, struct thandle *th)
1738 {
1739         struct osd_thandle *oh;
1740         struct inode       *inode;
1741         int                 rc;
1742         ENTRY;
1743
1744         LASSERT(th);
1745         oh = container_of(th, struct osd_thandle, ot_super);
1746
1747         /*
1748          * we don't need to reserve credits for whole truncate
1749          * it's not possible as truncate may need to free too many
1750          * blocks and that won't fit a single transaction. instead
1751          * we reserve credits to change i_size and put inode onto
1752          * orphan list. if needed truncate will extend or restart
1753          * transaction
1754          */
1755         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
1756                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
1757
1758         inode = osd_dt_obj(dt)->oo_inode;
1759         LASSERT(inode);
1760
1761         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1762                                    0, oh, osd_dt_obj(dt), true, NULL, false);
1763         RETURN(rc);
1764 }
1765
1766 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1767                      __u64 start, __u64 end, struct thandle *th)
1768 {
1769         struct osd_thandle *oh;
1770         struct osd_object  *obj = osd_dt_obj(dt);
1771         struct inode       *inode = obj->oo_inode;
1772         handle_t           *h;
1773         tid_t               tid;
1774         int                rc = 0, rc2 = 0;
1775         ENTRY;
1776
1777         LASSERT(end == OBD_OBJECT_EOF);
1778         LASSERT(dt_object_exists(dt));
1779         LASSERT(osd_invariant(obj));
1780         LASSERT(inode != NULL);
1781         ll_vfs_dq_init(inode);
1782
1783         LASSERT(th);
1784         oh = container_of(th, struct osd_thandle, ot_super);
1785         LASSERT(oh->ot_handle->h_transaction != NULL);
1786
1787         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
1788
1789         tid = oh->ot_handle->h_transaction->t_tid;
1790
1791         i_size_write(inode, start);
1792         ll_truncate_pagecache(inode, start);
1793 #ifdef HAVE_INODEOPS_TRUNCATE
1794         if (inode->i_op->truncate) {
1795                 inode->i_op->truncate(inode);
1796         } else
1797 #endif
1798                 ldiskfs_truncate(inode);
1799
1800         /*
1801          * For a partial-page truncate, flush the page to disk immediately to
1802          * avoid data corruption during direct disk write.  b=17397
1803          */
1804         if ((start & ~PAGE_MASK) != 0)
1805                 rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
1806
1807         h = journal_current_handle();
1808         LASSERT(h != NULL);
1809         LASSERT(h == oh->ot_handle);
1810
1811         /* do not check credits with osd_trans_exec_check() as the truncate
1812          * can restart the transaction internally and we restart the
1813          * transaction in this case */
1814
1815         if (tid != h->h_transaction->t_tid) {
1816                 int credits = oh->ot_credits;
1817                 /*
1818                  * transaction has changed during truncate
1819                  * we need to restart the handle with our credits
1820                  */
1821                 if (h->h_buffer_credits < credits) {
1822                         if (ldiskfs_journal_extend(h, credits))
1823                                 rc2 = ldiskfs_journal_restart(h, credits);
1824                 }
1825         }
1826
1827         RETURN(rc == 0 ? rc2 : rc);
1828 }
1829
1830 static int fiemap_check_ranges(struct inode *inode,
1831                                u64 start, u64 len, u64 *new_len)
1832 {
1833         loff_t maxbytes;
1834
1835         *new_len = len;
1836
1837         if (len == 0)
1838                 return -EINVAL;
1839
1840         if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
1841                 maxbytes = inode->i_sb->s_maxbytes;
1842         else
1843                 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
1844
1845         if (start > maxbytes)
1846                 return -EFBIG;
1847
1848         /*
1849          * Shrink request scope to what the fs can actually handle.
1850          */
1851         if (len > maxbytes || (maxbytes - len) < start)
1852                 *new_len = maxbytes - start;
1853
1854         return 0;
1855 }
1856
1857 /* So that the fiemap access checks can't overflow on 32 bit machines. */
1858 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
1859
1860 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
1861                           struct fiemap *fm)
1862 {
1863         struct fiemap_extent_info fieinfo = {0, };
1864         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1865         u64 len;
1866         int rc;
1867
1868
1869         LASSERT(inode);
1870         if (inode->i_op->fiemap == NULL)
1871                 return -EOPNOTSUPP;
1872
1873         if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
1874                 return -EINVAL;
1875
1876         rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
1877         if (rc)
1878                 return rc;
1879
1880         fieinfo.fi_flags = fm->fm_flags;
1881         fieinfo.fi_extents_max = fm->fm_extent_count;
1882         fieinfo.fi_extents_start = fm->fm_extents;
1883
1884         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
1885                 filemap_write_and_wait(inode->i_mapping);
1886
1887         rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
1888         fm->fm_flags = fieinfo.fi_flags;
1889         fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
1890
1891         return rc;
1892 }
1893
1894 /*
1895  * in some cases we may need declare methods for objects being created
1896  * e.g., when we create symlink
1897  */
1898 const struct dt_body_operations osd_body_ops_new = {
1899         .dbo_declare_write = osd_declare_write,
1900 };
1901
1902 const struct dt_body_operations osd_body_ops = {
1903         .dbo_read                 = osd_read,
1904         .dbo_declare_write        = osd_declare_write,
1905         .dbo_write                = osd_write,
1906         .dbo_bufs_get             = osd_bufs_get,
1907         .dbo_bufs_put             = osd_bufs_put,
1908         .dbo_write_prep           = osd_write_prep,
1909         .dbo_declare_write_commit = osd_declare_write_commit,
1910         .dbo_write_commit         = osd_write_commit,
1911         .dbo_read_prep            = osd_read_prep,
1912         .dbo_declare_punch         = osd_declare_punch,
1913         .dbo_punch                 = osd_punch,
1914         .dbo_fiemap_get           = osd_fiemap_get,
1915 };
1916