Whamcloud - gitweb
b8182f02eaca31d597731f555a4eeeb599df0a48
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd/osd_io.c
33  *
34  * body operations
35  *
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
38  *
39  */
40
41 /* prerequisite for linux/xattr.h */
42 #include <linux/types.h>
43 /* prerequisite for linux/xattr.h */
44 #include <linux/fs.h>
45
46 /*
47  * struct OBD_{ALLOC,FREE}*()
48  * OBD_FAIL_CHECK
49  */
50 #include <obd_support.h>
51
52 #include "osd_internal.h"
53
54 /* ext_depth() */
55 #include <ldiskfs/ldiskfs_extents.h>
56
57 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
58                             int rw, int line, int pages)
59 {
60         int blocks, i;
61
62         LASSERTF(iobuf->dr_elapsed_valid == 0,
63                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
64                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
65                  iobuf->dr_init_at);
66         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
67
68         init_waitqueue_head(&iobuf->dr_wait);
69         atomic_set(&iobuf->dr_numreqs, 0);
70         iobuf->dr_npages = 0;
71         iobuf->dr_error = 0;
72         iobuf->dr_dev = d;
73         iobuf->dr_frags = 0;
74         iobuf->dr_elapsed = 0;
75         /* must be counted before, so assert */
76         iobuf->dr_rw = rw;
77         iobuf->dr_init_at = line;
78
79         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
80         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
81                 LASSERT(iobuf->dr_pg_buf.lb_len >=
82                         pages * sizeof(iobuf->dr_pages[0]));
83                 return 0;
84         }
85
86         /* start with 1MB for 4K blocks */
87         i = 256;
88         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
89                 i <<= 1;
90
91         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
92                (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
93         pages = i;
94         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
95         iobuf->dr_max_pages = 0;
96         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
97                (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
98
99         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
100         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
101         if (unlikely(iobuf->dr_blocks == NULL))
102                 return -ENOMEM;
103
104         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
105         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
106         if (unlikely(iobuf->dr_pages == NULL))
107                 return -ENOMEM;
108
109         iobuf->dr_max_pages = pages;
110
111         return 0;
112 }
113 #define osd_init_iobuf(dev, iobuf, rw, pages) \
114         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
115
116 static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
117 {
118         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
119         iobuf->dr_pages[iobuf->dr_npages++] = page;
120 }
121
122 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
123 {
124         int rw = iobuf->dr_rw;
125
126         if (iobuf->dr_elapsed_valid) {
127                 iobuf->dr_elapsed_valid = 0;
128                 LASSERT(iobuf->dr_dev == d);
129                 LASSERT(iobuf->dr_frags > 0);
130                 lprocfs_oh_tally(&d->od_brw_stats.
131                                  hist[BRW_R_DIO_FRAGS+rw],
132                                  iobuf->dr_frags);
133                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
134                                       iobuf->dr_elapsed);
135         }
136 }
137
138 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
139 static void dio_complete_routine(struct bio *bio)
140 {
141         int error = bio->bi_error;
142 #else
143 static void dio_complete_routine(struct bio *bio, int error)
144 {
145 #endif
146         struct osd_iobuf *iobuf = bio->bi_private;
147         int iter;
148         struct bio_vec *bvl;
149
150         /* CAVEAT EMPTOR: possibly in IRQ context
151          * DO NOT record procfs stats here!!! */
152
153         if (unlikely(iobuf == NULL)) {
154                 CERROR("***** bio->bi_private is NULL!  This should never "
155                        "happen.  Normally, I would crash here, but instead I "
156                        "will dump the bio contents to the console.  Please "
157                        "report this to <https://jira.hpdd.intel.com/> , along "
158                        "with any interesting messages leading up to this point "
159                        "(like SCSI errors, perhaps).  Because bi_private is "
160                        "NULL, I can't wake up the thread that initiated this "
161                        "IO - you will probably have to reboot this node.\n");
162                 CERROR("bi_next: %p, bi_flags: %lx, "
163 #ifdef HAVE_BI_RW
164                        "bi_rw: %lu,"
165 #else
166                        "bi_opf: %u,"
167 #endif
168                        "bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p,"
169                        "bi_cnt: %d, bi_private: %p\n", bio->bi_next,
170                         (unsigned long)bio->bi_flags,
171 #ifdef HAVE_BI_RW
172                         bio->bi_rw,
173 #else
174                         bio->bi_opf,
175 #endif
176                         bio->bi_vcnt, bio_idx(bio),
177                         bio_sectors(bio) << 9, bio->bi_end_io,
178 #ifdef HAVE_BI_CNT
179                         atomic_read(&bio->bi_cnt),
180 #else
181                         atomic_read(&bio->__bi_cnt),
182 #endif
183                         bio->bi_private);
184                 return;
185         }
186
187         /* the check is outside of the cycle for performance reason -bzzz */
188         if (!bio_data_dir(bio)) {
189                 bio_for_each_segment_all(bvl, bio, iter) {
190                         if (likely(error == 0))
191                                 SetPageUptodate(bvl_to_page(bvl));
192                         LASSERT(PageLocked(bvl_to_page(bvl)));
193                 }
194                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
195         } else {
196                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
197         }
198
199         /* any real error is good enough -bzzz */
200         if (error != 0 && iobuf->dr_error == 0)
201                 iobuf->dr_error = error;
202
203         /*
204          * set dr_elapsed before dr_numreqs turns to 0, otherwise
205          * it's possible that service thread will see dr_numreqs
206          * is zero, but dr_elapsed is not set yet, leading to lost
207          * data in this processing and an assertion in a subsequent
208          * call to OSD.
209          */
210         if (atomic_read(&iobuf->dr_numreqs) == 1) {
211                 iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
212                 iobuf->dr_elapsed_valid = 1;
213         }
214         if (atomic_dec_and_test(&iobuf->dr_numreqs))
215                 wake_up(&iobuf->dr_wait);
216
217         /* Completed bios used to be chained off iobuf->dr_bios and freed in
218          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
219          * mempool when serious on-disk fragmentation was encountered,
220          * deadlocking the OST.  The bios are now released as soon as complete
221          * so the pool cannot be exhausted while IOs are competing. bug 10076 */
222         bio_put(bio);
223 }
224
225 static void record_start_io(struct osd_iobuf *iobuf, int size)
226 {
227         struct osd_device    *osd = iobuf->dr_dev;
228         struct obd_histogram *h = osd->od_brw_stats.hist;
229
230         iobuf->dr_frags++;
231         atomic_inc(&iobuf->dr_numreqs);
232
233         if (iobuf->dr_rw == 0) {
234                 atomic_inc(&osd->od_r_in_flight);
235                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
236                                  atomic_read(&osd->od_r_in_flight));
237                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
238         } else if (iobuf->dr_rw == 1) {
239                 atomic_inc(&osd->od_w_in_flight);
240                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
241                                  atomic_read(&osd->od_w_in_flight));
242                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
243         } else {
244                 LBUG();
245         }
246 }
247
248 static void osd_submit_bio(int rw, struct bio *bio)
249 {
250         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
251 #ifdef HAVE_SUBMIT_BIO_2ARGS
252         if (rw == 0)
253                 submit_bio(READ, bio);
254         else
255                 submit_bio(WRITE, bio);
256 #else
257         bio->bi_opf |= rw;
258         submit_bio(bio);
259 #endif
260 }
261
262 static int can_be_merged(struct bio *bio, sector_t sector)
263 {
264         if (bio == NULL)
265                 return 0;
266
267         return bio_end_sector(bio) == sector ? 1 : 0;
268 }
269
270 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
271                       struct osd_iobuf *iobuf)
272 {
273         int            blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
274         struct page  **pages = iobuf->dr_pages;
275         int            npages = iobuf->dr_npages;
276         sector_t      *blocks = iobuf->dr_blocks;
277         int            total_blocks = npages * blocks_per_page;
278         int            sector_bits = inode->i_sb->s_blocksize_bits - 9;
279         unsigned int   blocksize = inode->i_sb->s_blocksize;
280         struct bio    *bio = NULL;
281         struct page   *page;
282         unsigned int   page_offset;
283         sector_t       sector;
284         int            nblocks;
285         int            block_idx;
286         int            page_idx;
287         int            i;
288         int            rc = 0;
289         DECLARE_PLUG(plug);
290         ENTRY;
291
292         LASSERT(iobuf->dr_npages == npages);
293
294         osd_brw_stats_update(osd, iobuf);
295         iobuf->dr_start_time = cfs_time_current();
296
297         blk_start_plug(&plug);
298         for (page_idx = 0, block_idx = 0;
299              page_idx < npages;
300              page_idx++, block_idx += blocks_per_page) {
301
302                 page = pages[page_idx];
303                 LASSERT(block_idx + blocks_per_page <= total_blocks);
304
305                 for (i = 0, page_offset = 0;
306                      i < blocks_per_page;
307                      i += nblocks, page_offset += blocksize * nblocks) {
308
309                         nblocks = 1;
310
311                         if (blocks[block_idx + i] == 0) {  /* hole */
312                                 LASSERTF(iobuf->dr_rw == 0,
313                                          "page_idx %u, block_idx %u, i %u\n",
314                                          page_idx, block_idx, i);
315                                 memset(kmap(page) + page_offset, 0, blocksize);
316                                 kunmap(page);
317                                 continue;
318                         }
319
320                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
321
322                         /* Additional contiguous file blocks? */
323                         while (i + nblocks < blocks_per_page &&
324                                (sector + (nblocks << sector_bits)) ==
325                                ((sector_t)blocks[block_idx + i + nblocks] <<
326                                 sector_bits))
327                                 nblocks++;
328
329                         if (bio != NULL &&
330                             can_be_merged(bio, sector) &&
331                             bio_add_page(bio, page,
332                                          blocksize * nblocks, page_offset) != 0)
333                                 continue;       /* added this frag OK */
334
335                         if (bio != NULL) {
336                                 struct request_queue *q =
337                                         bdev_get_queue(bio->bi_bdev);
338                                 unsigned int bi_size = bio_sectors(bio) << 9;
339
340                                 /* Dang! I have to fragment this I/O */
341                                 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
342                                        "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
343                                        bi_size, bio->bi_vcnt, bio->bi_max_vecs,
344                                        bio_sectors(bio),
345                                        queue_max_sectors(q),
346                                        bio_phys_segments(q, bio),
347                                        queue_max_phys_segments(q),
348                                        0, queue_max_hw_segments(q));
349                                 record_start_io(iobuf, bi_size);
350                                 osd_submit_bio(iobuf->dr_rw, bio);
351                         }
352
353                         /* allocate new bio */
354                         bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
355                                                       (npages - page_idx) *
356                                                       blocks_per_page));
357                         if (bio == NULL) {
358                                 CERROR("Can't allocate bio %u*%u = %u pages\n",
359                                        (npages - page_idx), blocks_per_page,
360                                        (npages - page_idx) * blocks_per_page);
361                                 rc = -ENOMEM;
362                                 goto out;
363                         }
364
365                         bio->bi_bdev = inode->i_sb->s_bdev;
366                         bio_set_sector(bio, sector);
367 #ifdef HAVE_BI_RW
368                         bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
369 #else
370                         bio->bi_opf = (iobuf->dr_rw == 0) ? READ : WRITE;
371 #endif
372                         bio->bi_end_io = dio_complete_routine;
373                         bio->bi_private = iobuf;
374
375                         rc = bio_add_page(bio, page,
376                                           blocksize * nblocks, page_offset);
377                         LASSERT(rc != 0);
378                 }
379         }
380
381         if (bio != NULL) {
382                 record_start_io(iobuf, bio_sectors(bio) << 9);
383                 osd_submit_bio(iobuf->dr_rw, bio);
384                 rc = 0;
385         }
386
387 out:
388         blk_finish_plug(&plug);
389
390         /* in order to achieve better IO throughput, we don't wait for writes
391          * completion here. instead we proceed with transaction commit in
392          * parallel and wait for IO completion once transaction is stopped
393          * see osd_trans_stop() for more details -bzzz */
394         if (iobuf->dr_rw == 0) {
395                 wait_event(iobuf->dr_wait,
396                            atomic_read(&iobuf->dr_numreqs) == 0);
397                 osd_fini_iobuf(osd, iobuf);
398         }
399
400         if (rc == 0)
401                 rc = iobuf->dr_error;
402         RETURN(rc);
403 }
404
405 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
406                                    struct niobuf_local *lnb)
407 {
408         ENTRY;
409
410         *nrpages = 0;
411
412         while (len > 0) {
413                 int poff = offset & (PAGE_SIZE - 1);
414                 int plen = PAGE_SIZE - poff;
415
416                 if (plen > len)
417                         plen = len;
418                 lnb->lnb_file_offset = offset;
419                 lnb->lnb_page_offset = poff;
420                 lnb->lnb_len = plen;
421                 /* lnb->lnb_flags = rnb->rnb_flags; */
422                 lnb->lnb_flags = 0;
423                 lnb->lnb_page = NULL;
424                 lnb->lnb_rc = 0;
425
426                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
427                          (long long) len);
428                 offset += plen;
429                 len -= plen;
430                 lnb++;
431                 (*nrpages)++;
432         }
433
434         RETURN(0);
435 }
436
437 static struct page *osd_get_page(struct dt_object *dt, loff_t offset,
438                                  gfp_t gfp_mask)
439 {
440         struct inode *inode = osd_dt_obj(dt)->oo_inode;
441         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
442         struct page *page;
443
444         LASSERT(inode);
445
446         page = find_or_create_page(inode->i_mapping, offset >> PAGE_SHIFT,
447                                    gfp_mask);
448
449         if (unlikely(page == NULL))
450                 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
451
452         return page;
453 }
454
455 /*
456  * there are following "locks":
457  * journal_start
458  * i_mutex
459  * page lock
460  *
461  * osd write path:
462  *  - lock page(s)
463  *  - journal_start
464  *  - truncate_sem
465  *
466  * ext4 vmtruncate:
467  *  - lock pages, unlock
468  *  - journal_start
469  *  - lock partial page
470  *  - i_data_sem
471  *
472  */
473
474 /**
475  * Unlock and release pages loaded by osd_bufs_get()
476  *
477  * Unlock \a npages pages from \a lnb and drop the refcount on them.
478  *
479  * \param env           thread execution environment
480  * \param dt            dt object undergoing IO (OSD object + methods)
481  * \param lnb           array of pages undergoing IO
482  * \param npages        number of pages in \a lnb
483  *
484  * \retval 0            always
485  */
486 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
487                         struct niobuf_local *lnb, int npages)
488 {
489         int i;
490
491         for (i = 0; i < npages; i++) {
492                 if (lnb[i].lnb_page == NULL)
493                         continue;
494                 LASSERT(PageLocked(lnb[i].lnb_page));
495                 unlock_page(lnb[i].lnb_page);
496                 put_page(lnb[i].lnb_page);
497                 dt_object_put(env, dt);
498                 lnb[i].lnb_page = NULL;
499         }
500
501         RETURN(0);
502 }
503
504 /**
505  * Load and lock pages undergoing IO
506  *
507  * Pages as described in the \a lnb array are fetched (from disk or cache)
508  * and locked for IO by the caller.
509  *
510  * DLM locking protects us from write and truncate competing for same region,
511  * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
512  * It's possible the writeout on a such a page is in progress when we access
513  * it. It's also possible that during this writeout we put new (partial) data
514  * into the page, but won't be able to proceed in filter_commitrw_write().
515  * Therefore, just wait for writeout completion as it should be rare enough.
516  *
517  * \param env           thread execution environment
518  * \param dt            dt object undergoing IO (OSD object + methods)
519  * \param pos           byte offset of IO start
520  * \param len           number of bytes of IO
521  * \param lnb           array of extents undergoing IO
522  * \param rw            read or write operation, and other flags
523  * \param capa          capabilities
524  *
525  * \retval pages        (zero or more) loaded successfully
526  * \retval -ENOMEM      on memory/page allocation error
527  */
528 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
529                         loff_t pos, ssize_t len, struct niobuf_local *lnb,
530                         enum dt_bufs_type rw)
531 {
532         struct osd_object *obj = osd_dt_obj(dt);
533         int npages, i, rc = 0;
534         gfp_t gfp_mask;
535
536         LASSERT(obj->oo_inode);
537
538         osd_map_remote_to_local(pos, len, &npages, lnb);
539
540         /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
541         gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
542                                              GFP_HIGHUSER;
543         for (i = 0; i < npages; i++, lnb++) {
544                 lnb->lnb_page = osd_get_page(dt, lnb->lnb_file_offset,
545                                              gfp_mask);
546                 if (lnb->lnb_page == NULL)
547                         GOTO(cleanup, rc = -ENOMEM);
548
549                 wait_on_page_writeback(lnb->lnb_page);
550                 BUG_ON(PageWriteback(lnb->lnb_page));
551
552                 lu_object_get(&dt->do_lu);
553         }
554
555         RETURN(i);
556
557 cleanup:
558         if (i > 0)
559                 osd_bufs_put(env, dt, lnb - i, i);
560         return rc;
561 }
562
563 #ifndef HAVE_LDISKFS_MAP_BLOCKS
564
565 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
566 #define ldiskfs_ext_pblock(ex) ext_pblock((ex))
567 #endif
568
569 struct bpointers {
570         sector_t *blocks;
571         unsigned long start;
572         int num;
573         int init_num;
574         int create;
575 };
576
577 static long ldiskfs_ext_find_goal(struct inode *inode,
578                                   struct ldiskfs_ext_path *path,
579                                   unsigned long block, int *aflags)
580 {
581         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
582         unsigned long bg_start;
583         unsigned long colour;
584         int depth;
585
586         if (path) {
587                 struct ldiskfs_extent *ex;
588                 depth = path->p_depth;
589
590                 /* try to predict block placement */
591                 if ((ex = path[depth].p_ext))
592                         return ldiskfs_ext_pblock(ex) +
593                                 (block - le32_to_cpu(ex->ee_block));
594
595                 /* it looks index is empty
596                  * try to find starting from index itself */
597                 if (path[depth].p_bh)
598                         return path[depth].p_bh->b_blocknr;
599         }
600
601         /* OK. use inode's group */
602         bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) +
603                 le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block);
604         colour = (current->pid % 16) *
605                 (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16);
606         return bg_start + colour + block;
607 }
608
609 static unsigned long new_blocks(handle_t *handle, struct inode *inode,
610                                 struct ldiskfs_ext_path *path,
611                                 unsigned long block, unsigned long *count,
612                                 int *err)
613 {
614         struct ldiskfs_allocation_request ar;
615         unsigned long pblock;
616         int aflags;
617
618         /* find neighbour allocated blocks */
619         ar.lleft = block;
620         *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
621         if (*err)
622                 return 0;
623         ar.lright = block;
624         *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright);
625         if (*err)
626                 return 0;
627
628         /* allocate new block */
629         ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags);
630         ar.inode = inode;
631         ar.logical = block;
632         ar.len = *count;
633         ar.flags = LDISKFS_MB_HINT_DATA;
634         pblock = ldiskfs_mb_new_blocks(handle, &ar, err);
635         *count = ar.len;
636         return pblock;
637 }
638
639 static int ldiskfs_ext_new_extent_cb(struct inode *inode,
640                                      struct ldiskfs_ext_path *path,
641                                      struct ldiskfs_ext_cache *cex,
642 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
643                                      struct ldiskfs_extent *ex,
644 #endif
645                                      void *cbdata)
646 {
647         struct bpointers *bp = cbdata;
648         struct ldiskfs_extent nex;
649         unsigned long pblock = 0;
650         unsigned long tgen;
651         int err, i;
652         unsigned long count;
653         handle_t *handle;
654
655 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
656         if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) {
657 #else
658         if ((cex->ec_len != 0) && (cex->ec_start != 0)) {
659 #endif
660                 err = EXT_CONTINUE;
661                 goto map;
662         }
663
664         if (bp->create == 0) {
665                 i = 0;
666                 if (cex->ec_block < bp->start)
667                         i = bp->start - cex->ec_block;
668                 if (i >= cex->ec_len)
669                         CERROR("nothing to do?! i = %d, e_num = %u\n",
670                                         i, cex->ec_len);
671                 for (; i < cex->ec_len && bp->num; i++) {
672                         *(bp->blocks) = 0;
673                         bp->blocks++;
674                         bp->num--;
675                         bp->start++;
676                 }
677
678                 return EXT_CONTINUE;
679         }
680
681         tgen = LDISKFS_I(inode)->i_ext_generation;
682         count = ldiskfs_ext_calc_credits_for_insert(inode, path);
683
684         handle = osd_journal_start(inode, LDISKFS_HT_MISC,
685                                    count + LDISKFS_ALLOC_NEEDED + 1);
686         if (IS_ERR(handle)) {
687                 return PTR_ERR(handle);
688         }
689
690         if (tgen != LDISKFS_I(inode)->i_ext_generation) {
691                 /* the tree has changed. so path can be invalid at moment */
692                 ldiskfs_journal_stop(handle);
693                 return EXT_REPEAT;
694         }
695
696         /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not
697          * protected by i_data_sem as whole. so we patch it to store
698          * generation to path and now verify the tree hasn't changed */
699         down_write((&LDISKFS_I(inode)->i_data_sem));
700
701         /* validate extent, make sure the extent tree does not changed */
702         if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) {
703                 /* cex is invalid, try again */
704                 up_write(&LDISKFS_I(inode)->i_data_sem);
705                 ldiskfs_journal_stop(handle);
706                 return EXT_REPEAT;
707         }
708
709         count = cex->ec_len;
710         pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err);
711         if (!pblock)
712                 goto out;
713         BUG_ON(count > cex->ec_len);
714
715         /* insert new extent */
716         nex.ee_block = cpu_to_le32(cex->ec_block);
717         ldiskfs_ext_store_pblock(&nex, pblock);
718         nex.ee_len = cpu_to_le16(count);
719         err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0);
720         if (err) {
721                 /* free data blocks we just allocated */
722                 /* not a good idea to call discard here directly,
723                  * but otherwise we'd need to call it every free() */
724                 ldiskfs_discard_preallocations(inode);
725 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
726                 ldiskfs_free_blocks(handle, inode, NULL,
727                                     ldiskfs_ext_pblock(&nex),
728                                     le16_to_cpu(nex.ee_len), 0);
729 #else
730                 ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex),
731                                     le16_to_cpu(nex.ee_len), 0);
732 #endif
733                 goto out;
734         }
735
736         /*
737          * Putting len of the actual extent we just inserted,
738          * we are asking ldiskfs_ext_walk_space() to continue
739          * scaning after that block
740          */
741         cex->ec_len = le16_to_cpu(nex.ee_len);
742         cex->ec_start = ldiskfs_ext_pblock(&nex);
743         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
744         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
745
746 out:
747         up_write((&LDISKFS_I(inode)->i_data_sem));
748         ldiskfs_journal_stop(handle);
749 map:
750         if (err >= 0) {
751                 /* map blocks */
752                 if (bp->num == 0) {
753                         CERROR("hmm. why do we find this extent?\n");
754                         CERROR("initial space: %lu:%u\n",
755                                 bp->start, bp->init_num);
756 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
757                         CERROR("current extent: %u/%u/%llu %d\n",
758                                 cex->ec_block, cex->ec_len,
759                                 (unsigned long long)cex->ec_start,
760                                 cex->ec_type);
761 #else
762                         CERROR("current extent: %u/%u/%llu\n",
763                                 cex->ec_block, cex->ec_len,
764                                 (unsigned long long)cex->ec_start);
765 #endif
766                 }
767                 i = 0;
768                 if (cex->ec_block < bp->start)
769                         i = bp->start - cex->ec_block;
770                 if (i >= cex->ec_len)
771                         CERROR("nothing to do?! i = %d, e_num = %u\n",
772                                         i, cex->ec_len);
773                 for (; i < cex->ec_len && bp->num; i++) {
774                         *(bp->blocks) = cex->ec_start + i;
775                         if (pblock != 0) {
776                                 /* unmap any possible underlying metadata from
777                                  * the block device mapping.  bug 6998. */
778 #ifndef HAVE_CLEAN_BDEV_ALIASES
779                                 unmap_underlying_metadata(inode->i_sb->s_bdev,
780                                                           *(bp->blocks));
781 #else
782                                 clean_bdev_aliases(inode->i_sb->s_bdev,
783                                                    *(bp->blocks), 1);
784 #endif
785                         }
786                         bp->blocks++;
787                         bp->num--;
788                         bp->start++;
789                 }
790         }
791         return err;
792 }
793
794 static int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long index,
795                                    int clen, sector_t *blocks, int create)
796 {
797         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
798         struct bpointers bp;
799         int err;
800
801         if (index + clen >= inode->i_sb->s_maxbytes >> PAGE_SHIFT)
802                 return -EFBIG;
803
804         bp.blocks = blocks;
805         bp.start = index * blocks_per_page;
806         bp.init_num = bp.num = clen * blocks_per_page;
807         bp.create = create;
808
809         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
810                bp.start, bp.start + bp.num - 1, (unsigned)inode->i_ino);
811
812         err = ldiskfs_ext_walk_space(inode, bp.start, bp.num,
813                                      ldiskfs_ext_new_extent_cb, &bp);
814         ldiskfs_ext_invalidate_cache(inode);
815
816         return err;
817 }
818
819 static int osd_ldiskfs_map_bm_inode_pages(struct inode *inode,
820                                           struct page **page, int pages,
821                                           sector_t *blocks, int create)
822 {
823         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
824         pgoff_t bitmap_max_page_index;
825         sector_t *b;
826         int rc = 0, i;
827
828         bitmap_max_page_index = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes >>
829                                 PAGE_SHIFT;
830         for (i = 0, b = blocks; i < pages; i++, page++) {
831                 if ((*page)->index + 1 >= bitmap_max_page_index) {
832                         rc = -EFBIG;
833                         break;
834                 }
835                 rc = ldiskfs_map_inode_page(inode, *page, b, create);
836                 if (rc) {
837                         CERROR("ino %lu, blk %llu create %d: rc %d\n",
838                                inode->i_ino,
839                                (unsigned long long)*b, create, rc);
840                         break;
841                 }
842                 b += blocks_per_page;
843         }
844         return rc;
845 }
846
847 static int osd_ldiskfs_map_ext_inode_pages(struct inode *inode,
848                                            struct page **page,
849                                            int pages, sector_t *blocks,
850                                            int create)
851 {
852         int rc = 0, i = 0, clen = 0;
853         struct page *fp = NULL;
854
855         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
856                 inode->i_ino, pages, (*page)->index);
857
858         /* pages are sorted already. so, we just have to find
859          * contig. space and process them properly */
860         while (i < pages) {
861                 if (fp == NULL) {
862                         /* start new extent */
863                         fp = *page++;
864                         clen = 1;
865                         i++;
866                         continue;
867                 } else if (fp->index + clen == (*page)->index) {
868                         /* continue the extent */
869                         page++;
870                         clen++;
871                         i++;
872                         continue;
873                 }
874
875                 /* process found extent */
876                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
877                                              blocks, create);
878                 if (rc)
879                         GOTO(cleanup, rc);
880
881                 /* look for next extent */
882                 fp = NULL;
883                 blocks += clen * (PAGE_SIZE >> inode->i_blkbits);
884         }
885
886         if (fp)
887                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
888                                              blocks, create);
889
890 cleanup:
891         return rc;
892 }
893
894 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
895                                        int pages, sector_t *blocks,
896                                        int create)
897 {
898         int rc;
899
900         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
901                 rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
902                                                      blocks, create);
903                 return rc;
904         }
905         rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create);
906
907         return rc;
908 }
909 #else
910 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
911                                        int pages, sector_t *blocks,
912                                        int create)
913 {
914         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
915         int rc = 0, i = 0;
916         struct page *fp = NULL;
917         int clen = 0;
918         pgoff_t max_page_index;
919         handle_t *handle = NULL;
920
921         max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
922
923         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
924                 inode->i_ino, pages, (*page)->index);
925
926         if (create) {
927                 create = LDISKFS_GET_BLOCKS_CREATE;
928                 handle = ldiskfs_journal_current_handle();
929                 LASSERT(handle != NULL);
930                 rc = osd_attach_jinode(inode);
931                 if (rc)
932                         return rc;
933         }
934         /* pages are sorted already. so, we just have to find
935          * contig. space and process them properly */
936         while (i < pages) {
937                 long blen, total = 0;
938                 struct ldiskfs_map_blocks map = { 0 };
939
940                 if (fp == NULL) { /* start new extent */
941                         fp = *page++;
942                         clen = 1;
943                         if (++i != pages)
944                                 continue;
945                 } else if (fp->index + clen == (*page)->index) {
946                         /* continue the extent */
947                         page++;
948                         clen++;
949                         if (++i != pages)
950                                 continue;
951                 }
952                 if (fp->index + clen >= max_page_index)
953                         GOTO(cleanup, rc = -EFBIG);
954                 /* process found extent */
955                 map.m_lblk = fp->index * blocks_per_page;
956                 map.m_len = blen = clen * blocks_per_page;
957 cont_map:
958                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
959                 if (rc >= 0) {
960                         int c = 0;
961                         for (; total < blen && c < map.m_len; c++, total++) {
962                                 if (rc == 0) {
963                                         *(blocks + total) = 0;
964                                         total++;
965                                         break;
966                                 } else {
967                                         *(blocks + total) = map.m_pblk + c;
968                                         /* unmap any possible underlying
969                                          * metadata from the block device
970                                          * mapping.  bug 6998. */
971                                         if ((map.m_flags & LDISKFS_MAP_NEW) &&
972                                             create)
973 #ifndef HAVE_CLEAN_BDEV_ALIASES
974                                                 unmap_underlying_metadata(
975                                                         inode->i_sb->s_bdev,
976                                                         map.m_pblk + c);
977 #else
978                                                 clean_bdev_aliases(
979                                                         inode->i_sb->s_bdev,
980                                                         map.m_pblk + c, 1);
981 #endif
982                                 }
983                         }
984                         rc = 0;
985                 }
986                 if (rc == 0 && total < blen) {
987                         map.m_lblk = fp->index * blocks_per_page + total;
988                         map.m_len = blen - total;
989                         goto cont_map;
990                 }
991                 if (rc != 0)
992                         GOTO(cleanup, rc);
993
994                 /* look for next extent */
995                 fp = NULL;
996                 blocks += blocks_per_page * clen;
997         }
998 cleanup:
999         return rc;
1000 }
1001 #endif /* HAVE_LDISKFS_MAP_BLOCKS */
1002
1003 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1004                           struct niobuf_local *lnb, int npages)
1005 {
1006         struct osd_thread_info *oti   = osd_oti_get(env);
1007         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
1008         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
1009         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
1010         ktime_t start;
1011         ktime_t end;
1012         s64 timediff;
1013         ssize_t                 isize;
1014         __s64                   maxidx;
1015         int                     rc = 0;
1016         int                     i;
1017         int                     cache = 0;
1018
1019         LASSERT(inode);
1020
1021         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1022         if (unlikely(rc != 0))
1023                 RETURN(rc);
1024
1025         isize = i_size_read(inode);
1026         maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1027
1028         if (osd->od_writethrough_cache)
1029                 cache = 1;
1030         if (isize > osd->od_readcache_max_filesize)
1031                 cache = 0;
1032
1033         start = ktime_get();
1034         for (i = 0; i < npages; i++) {
1035
1036                 if (cache == 0)
1037                         generic_error_remove_page(inode->i_mapping,
1038                                                   lnb[i].lnb_page);
1039
1040                 /*
1041                  * till commit the content of the page is undefined
1042                  * we'll set it uptodate once bulk is done. otherwise
1043                  * subsequent reads can access non-stable data
1044                  */
1045                 ClearPageUptodate(lnb[i].lnb_page);
1046
1047                 if (lnb[i].lnb_len == PAGE_SIZE)
1048                         continue;
1049
1050                 if (maxidx >= lnb[i].lnb_page->index) {
1051                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1052                 } else {
1053                         long off;
1054                         char *p = kmap(lnb[i].lnb_page);
1055
1056                         off = lnb[i].lnb_page_offset;
1057                         if (off)
1058                                 memset(p, 0, off);
1059                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1060                               ~PAGE_MASK;
1061                         if (off)
1062                                 memset(p + off, 0, PAGE_SIZE - off);
1063                         kunmap(lnb[i].lnb_page);
1064                 }
1065         }
1066         end = ktime_get();
1067         timediff = ktime_us_delta(end, start);
1068         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1069
1070         if (iobuf->dr_npages) {
1071                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1072                                                  iobuf->dr_npages,
1073                                                  iobuf->dr_blocks, 0);
1074                 if (likely(rc == 0)) {
1075                         rc = osd_do_bio(osd, inode, iobuf);
1076                         /* do IO stats for preparation reads */
1077                         osd_fini_iobuf(osd, iobuf);
1078                 }
1079         }
1080         RETURN(rc);
1081 }
1082
1083 struct osd_fextent {
1084         sector_t        start;
1085         sector_t        end;
1086         unsigned int    mapped:1;
1087 };
1088
1089 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1090                          struct osd_fextent *cached_extent)
1091 {
1092         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1093         sector_t block = offset >> inode->i_blkbits;
1094         sector_t start;
1095         struct fiemap_extent_info fei = { 0 };
1096         struct fiemap_extent fe = { 0 };
1097         mm_segment_t saved_fs;
1098         int rc;
1099
1100         if (block >= cached_extent->start && block < cached_extent->end)
1101                 return cached_extent->mapped;
1102
1103         if (i_size_read(inode) == 0)
1104                 return 0;
1105
1106         /* Beyond EOF, must not be mapped */
1107         if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1108                 return 0;
1109
1110         fei.fi_extents_max = 1;
1111         fei.fi_extents_start = &fe;
1112
1113         saved_fs = get_fs();
1114         set_fs(get_ds());
1115         rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1116         set_fs(saved_fs);
1117         if (rc != 0)
1118                 return 0;
1119
1120         start = fe.fe_logical >> inode->i_blkbits;
1121
1122         if (start > block) {
1123                 cached_extent->start = block;
1124                 cached_extent->end = start;
1125                 cached_extent->mapped = 0;
1126         } else {
1127                 cached_extent->start = start;
1128                 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1129                                       inode->i_blkbits;
1130                 cached_extent->mapped = 1;
1131         }
1132
1133         return cached_extent->mapped;
1134 }
1135
1136 static int osd_declare_write_commit(const struct lu_env *env,
1137                                     struct dt_object *dt,
1138                                     struct niobuf_local *lnb, int npages,
1139                                     struct thandle *handle)
1140 {
1141         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1142         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1143         struct osd_thandle      *oh;
1144         int                     extents = 1;
1145         int                     depth;
1146         int                     i;
1147         int                     newblocks;
1148         int                     rc = 0;
1149         int                     flags = 0;
1150         int                     credits = 0;
1151         long long               quota_space = 0;
1152         struct osd_fextent      extent = { 0 };
1153         enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1154         ENTRY;
1155
1156         LASSERT(handle != NULL);
1157         oh = container_of0(handle, struct osd_thandle, ot_super);
1158         LASSERT(oh->ot_handle == NULL);
1159
1160         newblocks = npages;
1161
1162         /* calculate number of extents (probably better to pass nb) */
1163         for (i = 0; i < npages; i++) {
1164                 if (i && lnb[i].lnb_file_offset !=
1165                     lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
1166                         extents++;
1167
1168                 if (!osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent))
1169                         quota_space += PAGE_SIZE;
1170
1171                 /* ignore quota for the whole request if any page is from
1172                  * client cache or written by root.
1173                  *
1174                  * XXX once we drop the 1.8 client support, the checking
1175                  * for whether page is from cache can be simplified as:
1176                  * !(lnb[i].flags & OBD_BRW_SYNC)
1177                  *
1178                  * XXX we could handle this on per-lnb basis as done by
1179                  * grant. */
1180                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1181                     (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1182                     OBD_BRW_FROM_GRANT)
1183                         declare_flags |= OSD_QID_FORCE;
1184         }
1185
1186         /*
1187          * each extent can go into new leaf causing a split
1188          * 5 is max tree depth: inode + 4 index blocks
1189          * with blockmaps, depth is 3 at most
1190          */
1191         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1192                 /*
1193                  * many concurrent threads may grow tree by the time
1194                  * our transaction starts. so, consider 2 is a min depth
1195                  */
1196                 depth = ext_depth(inode);
1197                 depth = max(depth, 1) + 1;
1198                 newblocks += depth;
1199                 credits++; /* inode */
1200                 credits += depth * 2 * extents;
1201         } else {
1202                 depth = 3;
1203                 newblocks += depth;
1204                 credits++; /* inode */
1205                 credits += depth * extents;
1206         }
1207
1208         /* quota space for metadata blocks */
1209         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1210
1211         /* quota space should be reported in 1K blocks */
1212         quota_space = toqb(quota_space);
1213
1214         /* each new block can go in different group (bitmap + gd) */
1215
1216         /* we can't dirty more bitmap blocks than exist */
1217         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1218                 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1219         else
1220                 credits += newblocks;
1221
1222         /* we can't dirty more gd blocks than exist */
1223         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1224                 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1225         else
1226                 credits += newblocks;
1227
1228         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1229
1230         /* make sure the over quota flags were not set */
1231         lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1232
1233         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1234                                    i_projid_read(inode), quota_space, oh,
1235                                    osd_dt_obj(dt), &flags, declare_flags);
1236
1237         /* we need only to store the overquota flags in the first lnb for
1238          * now, once we support multiple objects BRW, this code needs be
1239          * revised. */
1240         if (flags & QUOTA_FL_OVER_USRQUOTA)
1241                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1242         if (flags & QUOTA_FL_OVER_GRPQUOTA)
1243                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1244         if (flags & QUOTA_FL_OVER_PRJQUOTA)
1245                 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1246
1247         RETURN(rc);
1248 }
1249
1250 /* Check if a block is allocated or not */
1251 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1252                             struct niobuf_local *lnb, int npages,
1253                             struct thandle *thandle)
1254 {
1255         struct osd_thread_info *oti = osd_oti_get(env);
1256         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1257         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1258         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1259         loff_t isize;
1260         int rc = 0, i;
1261         struct osd_fextent extent = { 0 };
1262
1263         LASSERT(inode);
1264
1265         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1266         if (unlikely(rc != 0))
1267                 RETURN(rc);
1268
1269         isize = i_size_read(inode);
1270         ll_vfs_dq_init(inode);
1271
1272         for (i = 0; i < npages; i++) {
1273                 if (lnb[i].lnb_rc == -ENOSPC &&
1274                     osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent)) {
1275                         /* Allow the write to proceed if overwriting an
1276                          * existing block */
1277                         lnb[i].lnb_rc = 0;
1278                 }
1279
1280                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1281                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1282                                lnb[i].lnb_rc);
1283                         LASSERT(lnb[i].lnb_page);
1284                         generic_error_remove_page(inode->i_mapping,
1285                                                   lnb[i].lnb_page);
1286                         continue;
1287                 }
1288
1289                 LASSERT(PageLocked(lnb[i].lnb_page));
1290                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1291
1292                 if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize)
1293                         isize = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1294
1295                 /*
1296                  * Since write and truncate are serialized by oo_sem, even
1297                  * partial-page truncate should not leave dirty pages in the
1298                  * page cache.
1299                  */
1300                 LASSERT(!PageDirty(lnb[i].lnb_page));
1301
1302                 SetPageUptodate(lnb[i].lnb_page);
1303
1304                 osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1305         }
1306
1307         osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1308
1309         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1310                 rc = -ENOSPC;
1311         } else if (iobuf->dr_npages > 0) {
1312                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1313                                                  iobuf->dr_npages,
1314                                                  iobuf->dr_blocks, 1);
1315         } else {
1316                 /* no pages to write, no transno is needed */
1317                 thandle->th_local = 1;
1318         }
1319
1320         if (likely(rc == 0)) {
1321                 spin_lock(&inode->i_lock);
1322                 if (isize > i_size_read(inode)) {
1323                         i_size_write(inode, isize);
1324                         LDISKFS_I(inode)->i_disksize = isize;
1325                         spin_unlock(&inode->i_lock);
1326                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1327                 } else {
1328                         spin_unlock(&inode->i_lock);
1329                 }
1330
1331                 rc = osd_do_bio(osd, inode, iobuf);
1332                 /* we don't do stats here as in read path because
1333                  * write is async: we'll do this in osd_put_bufs() */
1334         } else {
1335                 osd_fini_iobuf(osd, iobuf);
1336         }
1337
1338         osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1339
1340         if (unlikely(rc != 0)) {
1341                 /* if write fails, we should drop pages from the cache */
1342                 for (i = 0; i < npages; i++) {
1343                         if (lnb[i].lnb_page == NULL)
1344                                 continue;
1345                         LASSERT(PageLocked(lnb[i].lnb_page));
1346                         generic_error_remove_page(inode->i_mapping,
1347                                                   lnb[i].lnb_page);
1348                 }
1349         }
1350
1351         RETURN(rc);
1352 }
1353
1354 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1355                          struct niobuf_local *lnb, int npages)
1356 {
1357         struct osd_thread_info *oti = osd_oti_get(env);
1358         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1359         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1360         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1361         int rc = 0, i, cache = 0, cache_hits = 0, cache_misses = 0;
1362         ktime_t start, end;
1363         s64 timediff;
1364         loff_t isize;
1365
1366         LASSERT(inode);
1367
1368         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1369         if (unlikely(rc != 0))
1370                 RETURN(rc);
1371
1372         isize = i_size_read(inode);
1373
1374         if (osd->od_read_cache)
1375                 cache = 1;
1376         if (isize > osd->od_readcache_max_filesize)
1377                 cache = 0;
1378
1379         start = ktime_get();
1380         for (i = 0; i < npages; i++) {
1381
1382                 if (isize <= lnb[i].lnb_file_offset)
1383                         /* If there's no more data, abort early.
1384                          * lnb->lnb_rc == 0, so it's easy to detect later. */
1385                         break;
1386
1387                 if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len)
1388                         lnb[i].lnb_rc = isize - lnb[i].lnb_file_offset;
1389                 else
1390                         lnb[i].lnb_rc = lnb[i].lnb_len;
1391
1392                 /* Bypass disk read if fail_loc is set properly */
1393                 if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
1394                         SetPageUptodate(lnb[i].lnb_page);
1395
1396                 if (PageUptodate(lnb[i].lnb_page)) {
1397                         cache_hits++;
1398                 } else {
1399                         cache_misses++;
1400                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1401                 }
1402
1403                 if (cache == 0)
1404                         generic_error_remove_page(inode->i_mapping,
1405                                                   lnb[i].lnb_page);
1406         }
1407         end = ktime_get();
1408         timediff = ktime_us_delta(end, start);
1409         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1410
1411         if (cache_hits != 0)
1412                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1413                                     cache_hits);
1414         if (cache_misses != 0)
1415                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1416                                     cache_misses);
1417         if (cache_hits + cache_misses != 0)
1418                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1419                                     cache_hits + cache_misses);
1420
1421         if (iobuf->dr_npages) {
1422                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1423                                                  iobuf->dr_npages,
1424                                                  iobuf->dr_blocks, 0);
1425                 rc = osd_do_bio(osd, inode, iobuf);
1426
1427                 /* IO stats will be done in osd_bufs_put() */
1428         }
1429
1430         RETURN(rc);
1431 }
1432
1433 /*
1434  * XXX: Another layering violation for now.
1435  *
1436  * We don't want to use ->f_op->read methods, because generic file write
1437  *
1438  *         - serializes on ->i_sem, and
1439  *
1440  *         - does a lot of extra work like balance_dirty_pages(),
1441  *
1442  * which doesn't work for globally shared files like /last_rcvd.
1443  */
1444 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1445 {
1446         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1447
1448         memcpy(buffer, (char *)ei->i_data, buflen);
1449
1450         return  buflen;
1451 }
1452
1453 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1454 {
1455         struct buffer_head *bh;
1456         unsigned long block;
1457         int osize;
1458         int blocksize;
1459         int csize;
1460         int boffs;
1461
1462         /* prevent reading after eof */
1463         spin_lock(&inode->i_lock);
1464         if (i_size_read(inode) < *offs + size) {
1465                 loff_t diff = i_size_read(inode) - *offs;
1466                 spin_unlock(&inode->i_lock);
1467                 if (diff < 0) {
1468                         CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
1469                                i_size_read(inode), *offs);
1470                         return -EBADR;
1471                 } else if (diff == 0) {
1472                         return 0;
1473                 } else {
1474                         size = diff;
1475                 }
1476         } else {
1477                 spin_unlock(&inode->i_lock);
1478         }
1479
1480         blocksize = 1 << inode->i_blkbits;
1481         osize = size;
1482         while (size > 0) {
1483                 block = *offs >> inode->i_blkbits;
1484                 boffs = *offs & (blocksize - 1);
1485                 csize = min(blocksize - boffs, size);
1486                 bh = __ldiskfs_bread(NULL, inode, block, 0);
1487                 if (IS_ERR(bh)) {
1488                         CERROR("%s: can't read %u@%llu on ino %lu: "
1489                                "rc = %ld\n", osd_ino2name(inode),
1490                                csize, *offs, inode->i_ino,
1491                                PTR_ERR(bh));
1492                         return PTR_ERR(bh);
1493                 }
1494
1495                 if (bh != NULL) {
1496                         memcpy(buf, bh->b_data + boffs, csize);
1497                         brelse(bh);
1498                 } else {
1499                         memset(buf, 0, csize);
1500                 }
1501
1502                 *offs += csize;
1503                 buf += csize;
1504                 size -= csize;
1505         }
1506         return osize;
1507 }
1508
1509 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1510                         struct lu_buf *buf, loff_t *pos)
1511 {
1512         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1513         int           rc;
1514
1515         /* Read small symlink from inode body as we need to maintain correct
1516          * on-disk symlinks for ldiskfs.
1517          */
1518         if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
1519             (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1520                 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
1521         else
1522                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1523
1524         return rc;
1525 }
1526
1527 static inline int osd_extents_enabled(struct super_block *sb,
1528                                       struct inode *inode)
1529 {
1530         if (inode != NULL) {
1531                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1532                         return 1;
1533         } else if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
1534                                 LDISKFS_FEATURE_INCOMPAT_EXTENTS)) {
1535                 return 1;
1536         }
1537         return 0;
1538 }
1539
1540 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1541                            const loff_t size, const loff_t pos,
1542                            const int blocks)
1543 {
1544         int credits, bits, bs, i;
1545
1546         bits = sb->s_blocksize_bits;
1547         bs = 1 << bits;
1548
1549         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1550          * we do not expect blockmaps on the large files,
1551          * so let's shrink it to 2 levels (4GB files) */
1552
1553         /* this is default reservation: 2 levels */
1554         credits = (blocks + 2) * 3;
1555
1556         /* actual offset is unknown, hard to optimize */
1557         if (pos == -1)
1558                 return credits;
1559
1560         /* now check for few specific cases to optimize */
1561         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1562                 /* no indirects */
1563                 credits = blocks;
1564                 /* allocate if not allocated */
1565                 if (inode == NULL) {
1566                         credits += blocks * 2;
1567                         return credits;
1568                 }
1569                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1570                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1571                         if (LDISKFS_I(inode)->i_data[i] == 0)
1572                                 credits += 2;
1573                 }
1574         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1575                 /* single indirect */
1576                 credits = blocks * 3;
1577                 if (inode == NULL ||
1578                     LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1579                         credits += 3;
1580                 else
1581                         /* The indirect block may be modified. */
1582                         credits += 1;
1583         }
1584
1585         return credits;
1586 }
1587
1588 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1589                                  const struct lu_buf *buf, loff_t _pos,
1590                                  struct thandle *handle)
1591 {
1592         struct osd_object  *obj  = osd_dt_obj(dt);
1593         struct inode       *inode = obj->oo_inode;
1594         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1595         struct osd_thandle *oh;
1596         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1597         int                 bits, bs;
1598         int                 depth, size;
1599         loff_t              pos;
1600         ENTRY;
1601
1602         LASSERT(buf != NULL);
1603         LASSERT(handle != NULL);
1604
1605         oh = container_of0(handle, struct osd_thandle, ot_super);
1606         LASSERT(oh->ot_handle == NULL);
1607
1608         size = buf->lb_len;
1609         bits = sb->s_blocksize_bits;
1610         bs = 1 << bits;
1611
1612         if (_pos == -1) {
1613                 /* if this is an append, then we
1614                  * should expect cross-block record */
1615                 pos = 0;
1616         } else {
1617                 pos = _pos;
1618         }
1619
1620         /* blocks to modify */
1621         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1622         LASSERT(blocks > 0);
1623
1624         if (inode != NULL && _pos != -1) {
1625                 /* object size in blocks */
1626                 est = (i_size_read(inode) + bs - 1) >> bits;
1627                 allocated = inode->i_blocks >> (bits - 9);
1628                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1629                         /* looks like an overwrite, no need to modify tree */
1630                         credits = blocks;
1631                         /* no need to modify i_size */
1632                         goto out;
1633                 }
1634         }
1635
1636         if (osd_extents_enabled(sb, inode)) {
1637                 /*
1638                  * many concurrent threads may grow tree by the time
1639                  * our transaction starts. so, consider 2 is a min depth
1640                  * for every level we may need to allocate a new block
1641                  * and take some entries from the old one. so, 3 blocks
1642                  * to allocate (bitmap, gd, itself) + old block - 4 per
1643                  * level.
1644                  */
1645                 depth = inode != NULL ? ext_depth(inode) : 0;
1646                 depth = max(depth, 1) + 1;
1647                 credits = depth;
1648                 /* if not append, then split may need to modify
1649                  * existing blocks moving entries into the new ones */
1650                 if (_pos != -1)
1651                         credits += depth;
1652                 /* blocks to store data: bitmap,gd,itself */
1653                 credits += blocks * 3;
1654         } else {
1655                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1656         }
1657         /* if inode is created as part of the transaction,
1658          * then it's counted already by the creation method */
1659         if (inode != NULL)
1660                 credits++;
1661
1662 out:
1663
1664         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1665
1666         /* dt_declare_write() is usually called for system objects, such
1667          * as llog or last_rcvd files. We needn't enforce quota on those
1668          * objects, so always set the lqi_space as 0. */
1669         if (inode != NULL)
1670                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1671                                            i_gid_read(inode),
1672                                            i_projid_read(inode), 0,
1673                                            oh, obj, NULL, OSD_QID_BLK);
1674         RETURN(rc);
1675 }
1676
1677 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1678 {
1679         /* LU-2634: clear the extent format for fast symlink */
1680         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1681
1682         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1683         spin_lock(&inode->i_lock);
1684         LDISKFS_I(inode)->i_disksize = buflen;
1685         i_size_write(inode, buflen);
1686         spin_unlock(&inode->i_lock);
1687         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1688
1689         return 0;
1690 }
1691
1692 int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
1693                              int write_NUL, loff_t *offs, handle_t *handle)
1694 {
1695         struct buffer_head *bh        = NULL;
1696         loff_t              offset    = *offs;
1697         loff_t              new_size  = i_size_read(inode);
1698         unsigned long       block;
1699         int                 blocksize = 1 << inode->i_blkbits;
1700         int                 err = 0;
1701         int                 size;
1702         int                 boffs;
1703         int                 dirty_inode = 0;
1704
1705         if (write_NUL) {
1706                 /*
1707                  * long symlink write does not count the NUL terminator in
1708                  * bufsize, we write it, and the inode's file size does not
1709                  * count the NUL terminator as well.
1710                  */
1711                 ((char *)buf)[bufsize] = '\0';
1712                 ++bufsize;
1713         }
1714
1715         while (bufsize > 0) {
1716                 int credits = handle->h_buffer_credits;
1717
1718                 if (bh)
1719                         brelse(bh);
1720
1721                 block = offset >> inode->i_blkbits;
1722                 boffs = offset & (blocksize - 1);
1723                 size = min(blocksize - boffs, bufsize);
1724                 bh = __ldiskfs_bread(handle, inode, block, 1);
1725                 if (IS_ERR_OR_NULL(bh)) {
1726                         if (bh == NULL) {
1727                                 err = -EIO;
1728                         } else {
1729                                 err = PTR_ERR(bh);
1730                                 bh = NULL;
1731                         }
1732
1733                         CERROR("%s: error reading offset %llu (block %lu, "
1734                                "size %d, offs %llu), credits %d/%d: rc = %d\n",
1735                                inode->i_sb->s_id, offset, block, bufsize, *offs,
1736                                credits, handle->h_buffer_credits, err);
1737                         break;
1738                 }
1739
1740                 err = ldiskfs_journal_get_write_access(handle, bh);
1741                 if (err) {
1742                         CERROR("journal_get_write_access() returned error %d\n",
1743                                err);
1744                         break;
1745                 }
1746                 LASSERTF(boffs + size <= bh->b_size,
1747                          "boffs %d size %d bh->b_size %lu\n",
1748                          boffs, size, (unsigned long)bh->b_size);
1749                 memcpy(bh->b_data + boffs, buf, size);
1750                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
1751                 if (err)
1752                         break;
1753
1754                 if (offset + size > new_size)
1755                         new_size = offset + size;
1756                 offset += size;
1757                 bufsize -= size;
1758                 buf += size;
1759         }
1760         if (bh)
1761                 brelse(bh);
1762
1763         if (write_NUL)
1764                 --new_size;
1765         /* correct in-core and on-disk sizes */
1766         if (new_size > i_size_read(inode)) {
1767                 spin_lock(&inode->i_lock);
1768                 if (new_size > i_size_read(inode))
1769                         i_size_write(inode, new_size);
1770                 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
1771                         LDISKFS_I(inode)->i_disksize = i_size_read(inode);
1772                         dirty_inode = 1;
1773                 }
1774                 spin_unlock(&inode->i_lock);
1775                 if (dirty_inode)
1776                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1777         }
1778
1779         if (err == 0)
1780                 *offs = offset;
1781         return err;
1782 }
1783
1784 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1785                          const struct lu_buf *buf, loff_t *pos,
1786                          struct thandle *handle, int ignore_quota)
1787 {
1788         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1789         struct osd_thandle      *oh;
1790         ssize_t                 result;
1791         int                     is_link;
1792
1793         LASSERT(dt_object_exists(dt));
1794
1795         LASSERT(handle != NULL);
1796         LASSERT(inode != NULL);
1797         ll_vfs_dq_init(inode);
1798
1799         /* XXX: don't check: one declared chunk can be used many times */
1800         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
1801
1802         oh = container_of(handle, struct osd_thandle, ot_super);
1803         LASSERT(oh->ot_handle->h_transaction != NULL);
1804         osd_trans_exec_op(env, handle, OSD_OT_WRITE);
1805
1806         /* Write small symlink to inode body as we need to maintain correct
1807          * on-disk symlinks for ldiskfs.
1808          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
1809          * does not count it in.
1810          */
1811         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
1812         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1813                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
1814         else
1815                 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
1816                                                   buf->lb_len, is_link, pos,
1817                                                   oh->ot_handle);
1818         if (result == 0)
1819                 result = buf->lb_len;
1820
1821         osd_trans_exec_check(env, handle, OSD_OT_WRITE);
1822
1823         return result;
1824 }
1825
1826 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1827                              __u64 start, __u64 end, struct thandle *th)
1828 {
1829         struct osd_thandle *oh;
1830         struct inode       *inode;
1831         int                 rc;
1832         ENTRY;
1833
1834         LASSERT(th);
1835         oh = container_of(th, struct osd_thandle, ot_super);
1836
1837         /*
1838          * we don't need to reserve credits for whole truncate
1839          * it's not possible as truncate may need to free too many
1840          * blocks and that won't fit a single transaction. instead
1841          * we reserve credits to change i_size and put inode onto
1842          * orphan list. if needed truncate will extend or restart
1843          * transaction
1844          */
1845         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
1846                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
1847
1848         inode = osd_dt_obj(dt)->oo_inode;
1849         LASSERT(inode);
1850
1851         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1852                                    i_projid_read(inode), 0, oh, osd_dt_obj(dt),
1853                                    NULL, OSD_QID_BLK);
1854         RETURN(rc);
1855 }
1856
1857 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1858                      __u64 start, __u64 end, struct thandle *th)
1859 {
1860         struct osd_thandle *oh;
1861         struct osd_object  *obj = osd_dt_obj(dt);
1862         struct inode       *inode = obj->oo_inode;
1863         handle_t           *h;
1864         tid_t               tid;
1865         int                rc = 0, rc2 = 0;
1866         ENTRY;
1867
1868         LASSERT(end == OBD_OBJECT_EOF);
1869         LASSERT(dt_object_exists(dt));
1870         LASSERT(osd_invariant(obj));
1871         LASSERT(inode != NULL);
1872         ll_vfs_dq_init(inode);
1873
1874         LASSERT(th);
1875         oh = container_of(th, struct osd_thandle, ot_super);
1876         LASSERT(oh->ot_handle->h_transaction != NULL);
1877
1878         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
1879
1880         tid = oh->ot_handle->h_transaction->t_tid;
1881
1882         spin_lock(&inode->i_lock);
1883         i_size_write(inode, start);
1884         spin_unlock(&inode->i_lock);
1885         ll_truncate_pagecache(inode, start);
1886 #ifdef HAVE_INODEOPS_TRUNCATE
1887         if (inode->i_op->truncate) {
1888                 inode->i_op->truncate(inode);
1889         } else
1890 #endif
1891                 ldiskfs_truncate(inode);
1892
1893         /*
1894          * For a partial-page truncate, flush the page to disk immediately to
1895          * avoid data corruption during direct disk write.  b=17397
1896          */
1897         if ((start & ~PAGE_MASK) != 0)
1898                 rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
1899
1900         h = journal_current_handle();
1901         LASSERT(h != NULL);
1902         LASSERT(h == oh->ot_handle);
1903
1904         /* do not check credits with osd_trans_exec_check() as the truncate
1905          * can restart the transaction internally and we restart the
1906          * transaction in this case */
1907
1908         if (tid != h->h_transaction->t_tid) {
1909                 int credits = oh->ot_credits;
1910                 /*
1911                  * transaction has changed during truncate
1912                  * we need to restart the handle with our credits
1913                  */
1914                 if (h->h_buffer_credits < credits) {
1915                         if (ldiskfs_journal_extend(h, credits))
1916                                 rc2 = ldiskfs_journal_restart(h, credits);
1917                 }
1918         }
1919
1920         RETURN(rc == 0 ? rc2 : rc);
1921 }
1922
1923 static int fiemap_check_ranges(struct inode *inode,
1924                                u64 start, u64 len, u64 *new_len)
1925 {
1926         loff_t maxbytes;
1927
1928         *new_len = len;
1929
1930         if (len == 0)
1931                 return -EINVAL;
1932
1933         if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
1934                 maxbytes = inode->i_sb->s_maxbytes;
1935         else
1936                 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
1937
1938         if (start > maxbytes)
1939                 return -EFBIG;
1940
1941         /*
1942          * Shrink request scope to what the fs can actually handle.
1943          */
1944         if (len > maxbytes || (maxbytes - len) < start)
1945                 *new_len = maxbytes - start;
1946
1947         return 0;
1948 }
1949
1950 /* So that the fiemap access checks can't overflow on 32 bit machines. */
1951 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
1952
1953 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
1954                           struct fiemap *fm)
1955 {
1956         struct fiemap_extent_info fieinfo = {0, };
1957         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1958         u64 len;
1959         int rc;
1960
1961
1962         LASSERT(inode);
1963         if (inode->i_op->fiemap == NULL)
1964                 return -EOPNOTSUPP;
1965
1966         if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
1967                 return -EINVAL;
1968
1969         rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
1970         if (rc)
1971                 return rc;
1972
1973         fieinfo.fi_flags = fm->fm_flags;
1974         fieinfo.fi_extents_max = fm->fm_extent_count;
1975         fieinfo.fi_extents_start = fm->fm_extents;
1976
1977         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
1978                 filemap_write_and_wait(inode->i_mapping);
1979
1980         rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
1981         fm->fm_flags = fieinfo.fi_flags;
1982         fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
1983
1984         return rc;
1985 }
1986
1987 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
1988                        __u64 start, __u64 end, enum lu_ladvise_type advice)
1989 {
1990         int              rc = 0;
1991         struct inode    *inode = osd_dt_obj(dt)->oo_inode;
1992         ENTRY;
1993
1994         switch (advice) {
1995         case LU_LADVISE_DONTNEED:
1996                 if (end == 0)
1997                         break;
1998                 invalidate_mapping_pages(inode->i_mapping,
1999                                          start >> PAGE_CACHE_SHIFT,
2000                                          (end - 1) >> PAGE_CACHE_SHIFT);
2001                 break;
2002         default:
2003                 rc = -ENOTSUPP;
2004                 break;
2005         }
2006
2007         RETURN(rc);
2008 }
2009
2010 /*
2011  * in some cases we may need declare methods for objects being created
2012  * e.g., when we create symlink
2013  */
2014 const struct dt_body_operations osd_body_ops_new = {
2015         .dbo_declare_write = osd_declare_write,
2016 };
2017
2018 const struct dt_body_operations osd_body_ops = {
2019         .dbo_read                       = osd_read,
2020         .dbo_declare_write              = osd_declare_write,
2021         .dbo_write                      = osd_write,
2022         .dbo_bufs_get                   = osd_bufs_get,
2023         .dbo_bufs_put                   = osd_bufs_put,
2024         .dbo_write_prep                 = osd_write_prep,
2025         .dbo_declare_write_commit       = osd_declare_write_commit,
2026         .dbo_write_commit               = osd_write_commit,
2027         .dbo_read_prep                  = osd_read_prep,
2028         .dbo_declare_punch              = osd_declare_punch,
2029         .dbo_punch                      = osd_punch,
2030         .dbo_fiemap_get                 = osd_fiemap_get,
2031         .dbo_ladvise                    = osd_ladvise,
2032 };