Whamcloud - gitweb
LU-9906 osd: use pagevec for putting pages
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd/osd_io.c
33  *
34  * body operations
35  *
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
38  *
39  */
40
41 /* LUSTRE_VERSION_CODE */
42 #include <lustre_ver.h>
43 /* prerequisite for linux/xattr.h */
44 #include <linux/types.h>
45 /* prerequisite for linux/xattr.h */
46 #include <linux/fs.h>
47 #include <linux/mm.h>
48 #include <linux/pagevec.h>
49
50 /*
51  * struct OBD_{ALLOC,FREE}*()
52  * OBD_FAIL_CHECK
53  */
54 #include <obd_support.h>
55
56 #include "osd_internal.h"
57
58 /* ext_depth() */
59 #include <ldiskfs/ldiskfs_extents.h>
60
61 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
62                             int rw, int line, int pages)
63 {
64         int blocks, i;
65
66         LASSERTF(iobuf->dr_elapsed_valid == 0,
67                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
68                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
69                  iobuf->dr_init_at);
70         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
71
72         init_waitqueue_head(&iobuf->dr_wait);
73         atomic_set(&iobuf->dr_numreqs, 0);
74         iobuf->dr_npages = 0;
75         iobuf->dr_error = 0;
76         iobuf->dr_dev = d;
77         iobuf->dr_frags = 0;
78         iobuf->dr_elapsed = 0;
79         /* must be counted before, so assert */
80         iobuf->dr_rw = rw;
81         iobuf->dr_init_at = line;
82
83         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
84         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
85                 LASSERT(iobuf->dr_pg_buf.lb_len >=
86                         pages * sizeof(iobuf->dr_pages[0]));
87                 return 0;
88         }
89
90         /* start with 1MB for 4K blocks */
91         i = 256;
92         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
93                 i <<= 1;
94
95         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
96                (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
97         pages = i;
98         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
99         iobuf->dr_max_pages = 0;
100         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
101                (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
102
103         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
104         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
105         if (unlikely(iobuf->dr_blocks == NULL))
106                 return -ENOMEM;
107
108         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
109         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
110         if (unlikely(iobuf->dr_pages == NULL))
111                 return -ENOMEM;
112
113         iobuf->dr_max_pages = pages;
114
115         return 0;
116 }
117 #define osd_init_iobuf(dev, iobuf, rw, pages) \
118         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
119
120 static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
121 {
122         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
123         iobuf->dr_pages[iobuf->dr_npages++] = page;
124 }
125
126 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
127 {
128         int rw = iobuf->dr_rw;
129
130         if (iobuf->dr_elapsed_valid) {
131                 iobuf->dr_elapsed_valid = 0;
132                 LASSERT(iobuf->dr_dev == d);
133                 LASSERT(iobuf->dr_frags > 0);
134                 lprocfs_oh_tally(&d->od_brw_stats.
135                                  hist[BRW_R_DIO_FRAGS+rw],
136                                  iobuf->dr_frags);
137                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
138                                       iobuf->dr_elapsed);
139         }
140 }
141
142 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
143 static void dio_complete_routine(struct bio *bio)
144 {
145         int error = bio->bi_error;
146 #else
147 static void dio_complete_routine(struct bio *bio, int error)
148 {
149 #endif
150         struct osd_iobuf *iobuf = bio->bi_private;
151         int iter;
152         struct bio_vec *bvl;
153
154         /* CAVEAT EMPTOR: possibly in IRQ context
155          * DO NOT record procfs stats here!!! */
156
157         if (unlikely(iobuf == NULL)) {
158                 CERROR("***** bio->bi_private is NULL!  This should never "
159                        "happen.  Normally, I would crash here, but instead I "
160                        "will dump the bio contents to the console.  Please "
161                        "report this to <https://jira.whamcloud.com/> , along "
162                        "with any interesting messages leading up to this point "
163                        "(like SCSI errors, perhaps).  Because bi_private is "
164                        "NULL, I can't wake up the thread that initiated this "
165                        "IO - you will probably have to reboot this node.\n");
166                 CERROR("bi_next: %p, bi_flags: %lx, "
167 #ifdef HAVE_BI_RW
168                        "bi_rw: %lu,"
169 #else
170                        "bi_opf: %u,"
171 #endif
172                        "bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p,"
173                        "bi_cnt: %d, bi_private: %p\n", bio->bi_next,
174                         (unsigned long)bio->bi_flags,
175 #ifdef HAVE_BI_RW
176                         bio->bi_rw,
177 #else
178                         bio->bi_opf,
179 #endif
180                         bio->bi_vcnt, bio_idx(bio),
181                         bio_sectors(bio) << 9, bio->bi_end_io,
182 #ifdef HAVE_BI_CNT
183                         atomic_read(&bio->bi_cnt),
184 #else
185                         atomic_read(&bio->__bi_cnt),
186 #endif
187                         bio->bi_private);
188                 return;
189         }
190
191         /* the check is outside of the cycle for performance reason -bzzz */
192         if (!bio_data_dir(bio)) {
193                 bio_for_each_segment_all(bvl, bio, iter) {
194                         if (likely(error == 0))
195                                 SetPageUptodate(bvl_to_page(bvl));
196                         LASSERT(PageLocked(bvl_to_page(bvl)));
197                 }
198                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
199         } else {
200                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
201         }
202
203         /* any real error is good enough -bzzz */
204         if (error != 0 && iobuf->dr_error == 0)
205                 iobuf->dr_error = error;
206
207         /*
208          * set dr_elapsed before dr_numreqs turns to 0, otherwise
209          * it's possible that service thread will see dr_numreqs
210          * is zero, but dr_elapsed is not set yet, leading to lost
211          * data in this processing and an assertion in a subsequent
212          * call to OSD.
213          */
214         if (atomic_read(&iobuf->dr_numreqs) == 1) {
215                 iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
216                 iobuf->dr_elapsed_valid = 1;
217         }
218         if (atomic_dec_and_test(&iobuf->dr_numreqs))
219                 wake_up(&iobuf->dr_wait);
220
221         /* Completed bios used to be chained off iobuf->dr_bios and freed in
222          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
223          * mempool when serious on-disk fragmentation was encountered,
224          * deadlocking the OST.  The bios are now released as soon as complete
225          * so the pool cannot be exhausted while IOs are competing. bug 10076 */
226         bio_put(bio);
227 }
228
229 static void record_start_io(struct osd_iobuf *iobuf, int size)
230 {
231         struct osd_device    *osd = iobuf->dr_dev;
232         struct obd_histogram *h = osd->od_brw_stats.hist;
233
234         iobuf->dr_frags++;
235         atomic_inc(&iobuf->dr_numreqs);
236
237         if (iobuf->dr_rw == 0) {
238                 atomic_inc(&osd->od_r_in_flight);
239                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
240                                  atomic_read(&osd->od_r_in_flight));
241                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
242         } else if (iobuf->dr_rw == 1) {
243                 atomic_inc(&osd->od_w_in_flight);
244                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
245                                  atomic_read(&osd->od_w_in_flight));
246                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
247         } else {
248                 LBUG();
249         }
250 }
251
252 static void osd_submit_bio(int rw, struct bio *bio)
253 {
254         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
255 #ifdef HAVE_SUBMIT_BIO_2ARGS
256         if (rw == 0)
257                 submit_bio(READ, bio);
258         else
259                 submit_bio(WRITE, bio);
260 #else
261         bio->bi_opf |= rw;
262         submit_bio(bio);
263 #endif
264 }
265
266 static int can_be_merged(struct bio *bio, sector_t sector)
267 {
268         if (bio == NULL)
269                 return 0;
270
271         return bio_end_sector(bio) == sector ? 1 : 0;
272 }
273
274 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
275                       struct osd_iobuf *iobuf)
276 {
277         int            blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
278         struct page  **pages = iobuf->dr_pages;
279         int            npages = iobuf->dr_npages;
280         sector_t      *blocks = iobuf->dr_blocks;
281         int            total_blocks = npages * blocks_per_page;
282         int            sector_bits = inode->i_sb->s_blocksize_bits - 9;
283         unsigned int   blocksize = inode->i_sb->s_blocksize;
284         struct bio    *bio = NULL;
285         struct page   *page;
286         unsigned int   page_offset;
287         sector_t       sector;
288         int            nblocks;
289         int            block_idx;
290         int            page_idx;
291         int            i;
292         int            rc = 0;
293         DECLARE_PLUG(plug);
294         ENTRY;
295
296         LASSERT(iobuf->dr_npages == npages);
297
298         osd_brw_stats_update(osd, iobuf);
299         iobuf->dr_start_time = cfs_time_current();
300
301         blk_start_plug(&plug);
302         for (page_idx = 0, block_idx = 0;
303              page_idx < npages;
304              page_idx++, block_idx += blocks_per_page) {
305
306                 page = pages[page_idx];
307                 LASSERT(block_idx + blocks_per_page <= total_blocks);
308
309                 for (i = 0, page_offset = 0;
310                      i < blocks_per_page;
311                      i += nblocks, page_offset += blocksize * nblocks) {
312
313                         nblocks = 1;
314
315                         if (blocks[block_idx + i] == 0) {  /* hole */
316                                 LASSERTF(iobuf->dr_rw == 0,
317                                          "page_idx %u, block_idx %u, i %u\n",
318                                          page_idx, block_idx, i);
319                                 memset(kmap(page) + page_offset, 0, blocksize);
320                                 kunmap(page);
321                                 continue;
322                         }
323
324                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
325
326                         /* Additional contiguous file blocks? */
327                         while (i + nblocks < blocks_per_page &&
328                                (sector + (nblocks << sector_bits)) ==
329                                ((sector_t)blocks[block_idx + i + nblocks] <<
330                                 sector_bits))
331                                 nblocks++;
332
333                         if (bio != NULL &&
334                             can_be_merged(bio, sector) &&
335                             bio_add_page(bio, page,
336                                          blocksize * nblocks, page_offset) != 0)
337                                 continue;       /* added this frag OK */
338
339                         if (bio != NULL) {
340                                 struct request_queue *q =
341                                         bdev_get_queue(bio->bi_bdev);
342                                 unsigned int bi_size = bio_sectors(bio) << 9;
343
344                                 /* Dang! I have to fragment this I/O */
345                                 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
346                                        "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
347                                        bi_size, bio->bi_vcnt, bio->bi_max_vecs,
348                                        bio_sectors(bio),
349                                        queue_max_sectors(q),
350                                        bio_phys_segments(q, bio),
351                                        queue_max_phys_segments(q),
352                                        0, queue_max_hw_segments(q));
353                                 record_start_io(iobuf, bi_size);
354                                 osd_submit_bio(iobuf->dr_rw, bio);
355                         }
356
357                         /* allocate new bio */
358                         bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
359                                                       (npages - page_idx) *
360                                                       blocks_per_page));
361                         if (bio == NULL) {
362                                 CERROR("Can't allocate bio %u*%u = %u pages\n",
363                                        (npages - page_idx), blocks_per_page,
364                                        (npages - page_idx) * blocks_per_page);
365                                 rc = -ENOMEM;
366                                 goto out;
367                         }
368
369                         bio->bi_bdev = inode->i_sb->s_bdev;
370                         bio_set_sector(bio, sector);
371 #ifdef HAVE_BI_RW
372                         bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
373 #else
374                         bio->bi_opf = (iobuf->dr_rw == 0) ? READ : WRITE;
375 #endif
376                         bio->bi_end_io = dio_complete_routine;
377                         bio->bi_private = iobuf;
378
379                         rc = bio_add_page(bio, page,
380                                           blocksize * nblocks, page_offset);
381                         LASSERT(rc != 0);
382                 }
383         }
384
385         if (bio != NULL) {
386                 record_start_io(iobuf, bio_sectors(bio) << 9);
387                 osd_submit_bio(iobuf->dr_rw, bio);
388                 rc = 0;
389         }
390
391 out:
392         blk_finish_plug(&plug);
393
394         /* in order to achieve better IO throughput, we don't wait for writes
395          * completion here. instead we proceed with transaction commit in
396          * parallel and wait for IO completion once transaction is stopped
397          * see osd_trans_stop() for more details -bzzz */
398         if (iobuf->dr_rw == 0) {
399                 wait_event(iobuf->dr_wait,
400                            atomic_read(&iobuf->dr_numreqs) == 0);
401                 osd_fini_iobuf(osd, iobuf);
402         }
403
404         if (rc == 0)
405                 rc = iobuf->dr_error;
406         RETURN(rc);
407 }
408
409 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
410                                    struct niobuf_local *lnb)
411 {
412         ENTRY;
413
414         *nrpages = 0;
415
416         while (len > 0) {
417                 int poff = offset & (PAGE_SIZE - 1);
418                 int plen = PAGE_SIZE - poff;
419
420                 if (plen > len)
421                         plen = len;
422                 lnb->lnb_file_offset = offset;
423                 lnb->lnb_page_offset = poff;
424                 lnb->lnb_len = plen;
425                 /* lnb->lnb_flags = rnb->rnb_flags; */
426                 lnb->lnb_flags = 0;
427                 lnb->lnb_page = NULL;
428                 lnb->lnb_rc = 0;
429
430                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
431                          (long long) len);
432                 offset += plen;
433                 len -= plen;
434                 lnb++;
435                 (*nrpages)++;
436         }
437
438         RETURN(0);
439 }
440
441 static struct page *osd_get_page(struct dt_object *dt, loff_t offset,
442                                  gfp_t gfp_mask)
443 {
444         struct inode *inode = osd_dt_obj(dt)->oo_inode;
445         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
446         struct page *page;
447
448         LASSERT(inode);
449
450         page = find_or_create_page(inode->i_mapping, offset >> PAGE_SHIFT,
451                                    gfp_mask);
452
453         if (unlikely(page == NULL))
454                 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
455
456         return page;
457 }
458
459 /*
460  * there are following "locks":
461  * journal_start
462  * i_mutex
463  * page lock
464  *
465  * osd write path:
466  *  - lock page(s)
467  *  - journal_start
468  *  - truncate_sem
469  *
470  * ext4 vmtruncate:
471  *  - lock pages, unlock
472  *  - journal_start
473  *  - lock partial page
474  *  - i_data_sem
475  *
476  */
477
478 /**
479  * Unlock and release pages loaded by osd_bufs_get()
480  *
481  * Unlock \a npages pages from \a lnb and drop the refcount on them.
482  *
483  * \param env           thread execution environment
484  * \param dt            dt object undergoing IO (OSD object + methods)
485  * \param lnb           array of pages undergoing IO
486  * \param npages        number of pages in \a lnb
487  *
488  * \retval 0            always
489  */
490 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
491                         struct niobuf_local *lnb, int npages)
492 {
493         struct pagevec pvec;
494         int i;
495
496         pagevec_init(&pvec, 0);
497
498         for (i = 0; i < npages; i++) {
499                 if (lnb[i].lnb_page == NULL)
500                         continue;
501                 LASSERT(PageLocked(lnb[i].lnb_page));
502                 unlock_page(lnb[i].lnb_page);
503                 if (pagevec_add(&pvec, lnb[i].lnb_page) == 0)
504                         pagevec_release(&pvec);
505                 dt_object_put(env, dt);
506                 lnb[i].lnb_page = NULL;
507         }
508
509         /* Release any partial pagevec */
510         pagevec_release(&pvec);
511
512         RETURN(0);
513 }
514
515 /**
516  * Load and lock pages undergoing IO
517  *
518  * Pages as described in the \a lnb array are fetched (from disk or cache)
519  * and locked for IO by the caller.
520  *
521  * DLM locking protects us from write and truncate competing for same region,
522  * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
523  * It's possible the writeout on a such a page is in progress when we access
524  * it. It's also possible that during this writeout we put new (partial) data
525  * into the page, but won't be able to proceed in filter_commitrw_write().
526  * Therefore, just wait for writeout completion as it should be rare enough.
527  *
528  * \param env           thread execution environment
529  * \param dt            dt object undergoing IO (OSD object + methods)
530  * \param pos           byte offset of IO start
531  * \param len           number of bytes of IO
532  * \param lnb           array of extents undergoing IO
533  * \param rw            read or write operation, and other flags
534  * \param capa          capabilities
535  *
536  * \retval pages        (zero or more) loaded successfully
537  * \retval -ENOMEM      on memory/page allocation error
538  */
539 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
540                         loff_t pos, ssize_t len, struct niobuf_local *lnb,
541                         enum dt_bufs_type rw)
542 {
543         struct osd_object *obj = osd_dt_obj(dt);
544         int npages, i, rc = 0;
545         gfp_t gfp_mask;
546
547         LASSERT(obj->oo_inode);
548
549         osd_map_remote_to_local(pos, len, &npages, lnb);
550
551         /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
552         gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
553                                              GFP_HIGHUSER;
554         for (i = 0; i < npages; i++, lnb++) {
555                 lnb->lnb_page = osd_get_page(dt, lnb->lnb_file_offset,
556                                              gfp_mask);
557                 if (lnb->lnb_page == NULL)
558                         GOTO(cleanup, rc = -ENOMEM);
559
560                 wait_on_page_writeback(lnb->lnb_page);
561                 BUG_ON(PageWriteback(lnb->lnb_page));
562
563                 lu_object_get(&dt->do_lu);
564         }
565
566         RETURN(i);
567
568 cleanup:
569         if (i > 0)
570                 osd_bufs_put(env, dt, lnb - i, i);
571         return rc;
572 }
573
574 #ifndef HAVE_LDISKFS_MAP_BLOCKS
575
576 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
577 #define ldiskfs_ext_pblock(ex) ext_pblock((ex))
578 #endif
579
580 struct bpointers {
581         sector_t *blocks;
582         unsigned long start;
583         int num;
584         int init_num;
585         int create;
586 };
587
588 static long ldiskfs_ext_find_goal(struct inode *inode,
589                                   struct ldiskfs_ext_path *path,
590                                   unsigned long block, int *aflags)
591 {
592         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
593         unsigned long bg_start;
594         unsigned long colour;
595         int depth;
596
597         if (path) {
598                 struct ldiskfs_extent *ex;
599                 depth = path->p_depth;
600
601                 /* try to predict block placement */
602                 if ((ex = path[depth].p_ext))
603                         return ldiskfs_ext_pblock(ex) +
604                                 (block - le32_to_cpu(ex->ee_block));
605
606                 /* it looks index is empty
607                  * try to find starting from index itself */
608                 if (path[depth].p_bh)
609                         return path[depth].p_bh->b_blocknr;
610         }
611
612         /* OK. use inode's group */
613         bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) +
614                 le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block);
615         colour = (current->pid % 16) *
616                 (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16);
617         return bg_start + colour + block;
618 }
619
620 static unsigned long new_blocks(handle_t *handle, struct inode *inode,
621                                 struct ldiskfs_ext_path *path,
622                                 unsigned long block, unsigned long *count,
623                                 int *err)
624 {
625         struct ldiskfs_allocation_request ar;
626         unsigned long pblock;
627         int aflags;
628
629         /* find neighbour allocated blocks */
630         ar.lleft = block;
631         *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
632         if (*err)
633                 return 0;
634         ar.lright = block;
635         *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright);
636         if (*err)
637                 return 0;
638
639         /* allocate new block */
640         ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags);
641         ar.inode = inode;
642         ar.logical = block;
643         ar.len = *count;
644         ar.flags = LDISKFS_MB_HINT_DATA;
645         pblock = ldiskfs_mb_new_blocks(handle, &ar, err);
646         *count = ar.len;
647         return pblock;
648 }
649
650 static int ldiskfs_ext_new_extent_cb(struct inode *inode,
651                                      struct ldiskfs_ext_path *path,
652                                      struct ldiskfs_ext_cache *cex,
653 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
654                                      struct ldiskfs_extent *ex,
655 #endif
656                                      void *cbdata)
657 {
658         struct bpointers *bp = cbdata;
659         struct ldiskfs_extent nex;
660         unsigned long pblock = 0;
661         unsigned long tgen;
662         int err, i;
663         unsigned long count;
664         handle_t *handle;
665
666 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
667         if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) {
668 #else
669         if ((cex->ec_len != 0) && (cex->ec_start != 0)) {
670 #endif
671                 err = EXT_CONTINUE;
672                 goto map;
673         }
674
675         if (bp->create == 0) {
676                 i = 0;
677                 if (cex->ec_block < bp->start)
678                         i = bp->start - cex->ec_block;
679                 if (i >= cex->ec_len)
680                         CERROR("nothing to do?! i = %d, e_num = %u\n",
681                                         i, cex->ec_len);
682                 for (; i < cex->ec_len && bp->num; i++) {
683                         *(bp->blocks) = 0;
684                         bp->blocks++;
685                         bp->num--;
686                         bp->start++;
687                 }
688
689                 return EXT_CONTINUE;
690         }
691
692         tgen = LDISKFS_I(inode)->i_ext_generation;
693         count = ldiskfs_ext_calc_credits_for_insert(inode, path);
694
695         handle = osd_journal_start(inode, LDISKFS_HT_MISC,
696                                    count + LDISKFS_ALLOC_NEEDED + 1);
697         if (IS_ERR(handle)) {
698                 return PTR_ERR(handle);
699         }
700
701         if (tgen != LDISKFS_I(inode)->i_ext_generation) {
702                 /* the tree has changed. so path can be invalid at moment */
703                 ldiskfs_journal_stop(handle);
704                 return EXT_REPEAT;
705         }
706
707         /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not
708          * protected by i_data_sem as whole. so we patch it to store
709          * generation to path and now verify the tree hasn't changed */
710         down_write((&LDISKFS_I(inode)->i_data_sem));
711
712         /* validate extent, make sure the extent tree does not changed */
713         if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) {
714                 /* cex is invalid, try again */
715                 up_write(&LDISKFS_I(inode)->i_data_sem);
716                 ldiskfs_journal_stop(handle);
717                 return EXT_REPEAT;
718         }
719
720         count = cex->ec_len;
721         pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err);
722         if (!pblock)
723                 goto out;
724         BUG_ON(count > cex->ec_len);
725
726         /* insert new extent */
727         nex.ee_block = cpu_to_le32(cex->ec_block);
728         ldiskfs_ext_store_pblock(&nex, pblock);
729         nex.ee_len = cpu_to_le16(count);
730         err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0);
731         if (err) {
732                 /* free data blocks we just allocated */
733                 /* not a good idea to call discard here directly,
734                  * but otherwise we'd need to call it every free() */
735                 ldiskfs_discard_preallocations(inode);
736 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
737                 ldiskfs_free_blocks(handle, inode, NULL,
738                                     ldiskfs_ext_pblock(&nex),
739                                     le16_to_cpu(nex.ee_len), 0);
740 #else
741                 ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex),
742                                     le16_to_cpu(nex.ee_len), 0);
743 #endif
744                 goto out;
745         }
746
747         /*
748          * Putting len of the actual extent we just inserted,
749          * we are asking ldiskfs_ext_walk_space() to continue
750          * scaning after that block
751          */
752         cex->ec_len = le16_to_cpu(nex.ee_len);
753         cex->ec_start = ldiskfs_ext_pblock(&nex);
754         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
755         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
756
757 out:
758         up_write((&LDISKFS_I(inode)->i_data_sem));
759         ldiskfs_journal_stop(handle);
760 map:
761         if (err >= 0) {
762                 /* map blocks */
763                 if (bp->num == 0) {
764                         CERROR("hmm. why do we find this extent?\n");
765                         CERROR("initial space: %lu:%u\n",
766                                 bp->start, bp->init_num);
767 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
768                         CERROR("current extent: %u/%u/%llu %d\n",
769                                 cex->ec_block, cex->ec_len,
770                                 (unsigned long long)cex->ec_start,
771                                 cex->ec_type);
772 #else
773                         CERROR("current extent: %u/%u/%llu\n",
774                                 cex->ec_block, cex->ec_len,
775                                 (unsigned long long)cex->ec_start);
776 #endif
777                 }
778                 i = 0;
779                 if (cex->ec_block < bp->start)
780                         i = bp->start - cex->ec_block;
781                 if (i >= cex->ec_len)
782                         CERROR("nothing to do?! i = %d, e_num = %u\n",
783                                         i, cex->ec_len);
784                 for (; i < cex->ec_len && bp->num; i++) {
785                         *(bp->blocks) = cex->ec_start + i;
786                         if (pblock != 0) {
787                                 /* unmap any possible underlying metadata from
788                                  * the block device mapping.  bug 6998. */
789 #ifndef HAVE_CLEAN_BDEV_ALIASES
790                                 unmap_underlying_metadata(inode->i_sb->s_bdev,
791                                                           *(bp->blocks));
792 #else
793                                 clean_bdev_aliases(inode->i_sb->s_bdev,
794                                                    *(bp->blocks), 1);
795 #endif
796                         }
797                         bp->blocks++;
798                         bp->num--;
799                         bp->start++;
800                 }
801         }
802         return err;
803 }
804
805 static int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long index,
806                                    int clen, sector_t *blocks, int create)
807 {
808         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
809         struct bpointers bp;
810         int err;
811
812         if (index + clen >= inode->i_sb->s_maxbytes >> PAGE_SHIFT)
813                 return -EFBIG;
814
815         bp.blocks = blocks;
816         bp.start = index * blocks_per_page;
817         bp.init_num = bp.num = clen * blocks_per_page;
818         bp.create = create;
819
820         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
821                bp.start, bp.start + bp.num - 1, (unsigned)inode->i_ino);
822
823         err = ldiskfs_ext_walk_space(inode, bp.start, bp.num,
824                                      ldiskfs_ext_new_extent_cb, &bp);
825         ldiskfs_ext_invalidate_cache(inode);
826
827         return err;
828 }
829
830 static int osd_ldiskfs_map_bm_inode_pages(struct inode *inode,
831                                           struct page **page, int pages,
832                                           sector_t *blocks, int create)
833 {
834         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
835         pgoff_t bitmap_max_page_index;
836         sector_t *b;
837         int rc = 0, i;
838
839         bitmap_max_page_index = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes >>
840                                 PAGE_SHIFT;
841         for (i = 0, b = blocks; i < pages; i++, page++) {
842                 if ((*page)->index + 1 >= bitmap_max_page_index) {
843                         rc = -EFBIG;
844                         break;
845                 }
846                 rc = ldiskfs_map_inode_page(inode, *page, b, create);
847                 if (rc) {
848                         CERROR("ino %lu, blk %llu create %d: rc %d\n",
849                                inode->i_ino,
850                                (unsigned long long)*b, create, rc);
851                         break;
852                 }
853                 b += blocks_per_page;
854         }
855         return rc;
856 }
857
858 static int osd_ldiskfs_map_ext_inode_pages(struct inode *inode,
859                                            struct page **page,
860                                            int pages, sector_t *blocks,
861                                            int create)
862 {
863         int rc = 0, i = 0, clen = 0;
864         struct page *fp = NULL;
865
866         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
867                 inode->i_ino, pages, (*page)->index);
868
869         /* pages are sorted already. so, we just have to find
870          * contig. space and process them properly */
871         while (i < pages) {
872                 if (fp == NULL) {
873                         /* start new extent */
874                         fp = *page++;
875                         clen = 1;
876                         i++;
877                         continue;
878                 } else if (fp->index + clen == (*page)->index) {
879                         /* continue the extent */
880                         page++;
881                         clen++;
882                         i++;
883                         continue;
884                 }
885
886                 /* process found extent */
887                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
888                                              blocks, create);
889                 if (rc)
890                         GOTO(cleanup, rc);
891
892                 /* look for next extent */
893                 fp = NULL;
894                 blocks += clen * (PAGE_SIZE >> inode->i_blkbits);
895         }
896
897         if (fp)
898                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
899                                              blocks, create);
900
901 cleanup:
902         return rc;
903 }
904
905 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
906                                        int pages, sector_t *blocks,
907                                        int create)
908 {
909         int rc;
910
911         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
912                 rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
913                                                      blocks, create);
914                 return rc;
915         }
916         rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create);
917
918         return rc;
919 }
920 #else
921 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
922                                        int pages, sector_t *blocks,
923                                        int create)
924 {
925         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
926         int rc = 0, i = 0;
927         struct page *fp = NULL;
928         int clen = 0;
929         pgoff_t max_page_index;
930         handle_t *handle = NULL;
931
932         max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
933
934         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
935                 inode->i_ino, pages, (*page)->index);
936
937         if (create) {
938                 create = LDISKFS_GET_BLOCKS_CREATE;
939                 handle = ldiskfs_journal_current_handle();
940                 LASSERT(handle != NULL);
941                 rc = osd_attach_jinode(inode);
942                 if (rc)
943                         return rc;
944         }
945         /* pages are sorted already. so, we just have to find
946          * contig. space and process them properly */
947         while (i < pages) {
948                 long blen, total = 0;
949                 struct ldiskfs_map_blocks map = { 0 };
950
951                 if (fp == NULL) { /* start new extent */
952                         fp = *page++;
953                         clen = 1;
954                         if (++i != pages)
955                                 continue;
956                 } else if (fp->index + clen == (*page)->index) {
957                         /* continue the extent */
958                         page++;
959                         clen++;
960                         if (++i != pages)
961                                 continue;
962                 }
963                 if (fp->index + clen >= max_page_index)
964                         GOTO(cleanup, rc = -EFBIG);
965                 /* process found extent */
966                 map.m_lblk = fp->index * blocks_per_page;
967                 map.m_len = blen = clen * blocks_per_page;
968 cont_map:
969                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
970                 if (rc >= 0) {
971                         int c = 0;
972                         for (; total < blen && c < map.m_len; c++, total++) {
973                                 if (rc == 0) {
974                                         *(blocks + total) = 0;
975                                         total++;
976                                         break;
977                                 } else {
978                                         *(blocks + total) = map.m_pblk + c;
979                                         /* unmap any possible underlying
980                                          * metadata from the block device
981                                          * mapping.  bug 6998. */
982                                         if ((map.m_flags & LDISKFS_MAP_NEW) &&
983                                             create)
984 #ifndef HAVE_CLEAN_BDEV_ALIASES
985                                                 unmap_underlying_metadata(
986                                                         inode->i_sb->s_bdev,
987                                                         map.m_pblk + c);
988 #else
989                                                 clean_bdev_aliases(
990                                                         inode->i_sb->s_bdev,
991                                                         map.m_pblk + c, 1);
992 #endif
993                                 }
994                         }
995                         rc = 0;
996                 }
997                 if (rc == 0 && total < blen) {
998                         map.m_lblk = fp->index * blocks_per_page + total;
999                         map.m_len = blen - total;
1000                         goto cont_map;
1001                 }
1002                 if (rc != 0)
1003                         GOTO(cleanup, rc);
1004
1005                 /* look for next extent */
1006                 fp = NULL;
1007                 blocks += blocks_per_page * clen;
1008         }
1009 cleanup:
1010         return rc;
1011 }
1012 #endif /* HAVE_LDISKFS_MAP_BLOCKS */
1013
1014 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1015                           struct niobuf_local *lnb, int npages)
1016 {
1017         struct osd_thread_info *oti   = osd_oti_get(env);
1018         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
1019         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
1020         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
1021         ktime_t start;
1022         ktime_t end;
1023         s64 timediff;
1024         ssize_t                 isize;
1025         __s64                   maxidx;
1026         int                     rc = 0;
1027         int                     i;
1028         int                     cache = 0;
1029
1030         LASSERT(inode);
1031
1032         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1033         if (unlikely(rc != 0))
1034                 RETURN(rc);
1035
1036         isize = i_size_read(inode);
1037         maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1038
1039         if (osd->od_writethrough_cache)
1040                 cache = 1;
1041         if (isize > osd->od_readcache_max_filesize)
1042                 cache = 0;
1043
1044         start = ktime_get();
1045         for (i = 0; i < npages; i++) {
1046
1047                 if (cache == 0)
1048                         generic_error_remove_page(inode->i_mapping,
1049                                                   lnb[i].lnb_page);
1050
1051                 /*
1052                  * till commit the content of the page is undefined
1053                  * we'll set it uptodate once bulk is done. otherwise
1054                  * subsequent reads can access non-stable data
1055                  */
1056                 ClearPageUptodate(lnb[i].lnb_page);
1057
1058                 if (lnb[i].lnb_len == PAGE_SIZE)
1059                         continue;
1060
1061                 if (maxidx >= lnb[i].lnb_page->index) {
1062                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1063                 } else {
1064                         long off;
1065                         char *p = kmap(lnb[i].lnb_page);
1066
1067                         off = lnb[i].lnb_page_offset;
1068                         if (off)
1069                                 memset(p, 0, off);
1070                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1071                               ~PAGE_MASK;
1072                         if (off)
1073                                 memset(p + off, 0, PAGE_SIZE - off);
1074                         kunmap(lnb[i].lnb_page);
1075                 }
1076         }
1077         end = ktime_get();
1078         timediff = ktime_us_delta(end, start);
1079         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1080
1081         if (iobuf->dr_npages) {
1082                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1083                                                  iobuf->dr_npages,
1084                                                  iobuf->dr_blocks, 0);
1085                 if (likely(rc == 0)) {
1086                         rc = osd_do_bio(osd, inode, iobuf);
1087                         /* do IO stats for preparation reads */
1088                         osd_fini_iobuf(osd, iobuf);
1089                 }
1090         }
1091         RETURN(rc);
1092 }
1093
1094 struct osd_fextent {
1095         sector_t        start;
1096         sector_t        end;
1097         unsigned int    mapped:1;
1098 };
1099
1100 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1101                          struct osd_fextent *cached_extent)
1102 {
1103         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1104         sector_t block = offset >> inode->i_blkbits;
1105         sector_t start;
1106         struct fiemap_extent_info fei = { 0 };
1107         struct fiemap_extent fe = { 0 };
1108         mm_segment_t saved_fs;
1109         int rc;
1110
1111         if (block >= cached_extent->start && block < cached_extent->end)
1112                 return cached_extent->mapped;
1113
1114         if (i_size_read(inode) == 0)
1115                 return 0;
1116
1117         /* Beyond EOF, must not be mapped */
1118         if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1119                 return 0;
1120
1121         fei.fi_extents_max = 1;
1122         fei.fi_extents_start = &fe;
1123
1124         saved_fs = get_fs();
1125         set_fs(get_ds());
1126         rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1127         set_fs(saved_fs);
1128         if (rc != 0)
1129                 return 0;
1130
1131         start = fe.fe_logical >> inode->i_blkbits;
1132
1133         if (start > block) {
1134                 cached_extent->start = block;
1135                 cached_extent->end = start;
1136                 cached_extent->mapped = 0;
1137         } else {
1138                 cached_extent->start = start;
1139                 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1140                                       inode->i_blkbits;
1141                 cached_extent->mapped = 1;
1142         }
1143
1144         return cached_extent->mapped;
1145 }
1146
1147 static int osd_declare_write_commit(const struct lu_env *env,
1148                                     struct dt_object *dt,
1149                                     struct niobuf_local *lnb, int npages,
1150                                     struct thandle *handle)
1151 {
1152         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1153         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1154         struct osd_thandle      *oh;
1155         int                     extents = 1;
1156         int                     depth;
1157         int                     i;
1158         int                     newblocks;
1159         int                     rc = 0;
1160         int                     flags = 0;
1161         int                     credits = 0;
1162         long long               quota_space = 0;
1163         struct osd_fextent      extent = { 0 };
1164         enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1165         ENTRY;
1166
1167         LASSERT(handle != NULL);
1168         oh = container_of0(handle, struct osd_thandle, ot_super);
1169         LASSERT(oh->ot_handle == NULL);
1170
1171         newblocks = npages;
1172
1173         /* calculate number of extents (probably better to pass nb) */
1174         for (i = 0; i < npages; i++) {
1175                 if (i && lnb[i].lnb_file_offset !=
1176                     lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
1177                         extents++;
1178
1179                 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent))
1180                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1181                 else
1182                         quota_space += PAGE_SIZE;
1183
1184                 /* ignore quota for the whole request if any page is from
1185                  * client cache or written by root.
1186                  *
1187                  * XXX once we drop the 1.8 client support, the checking
1188                  * for whether page is from cache can be simplified as:
1189                  * !(lnb[i].flags & OBD_BRW_SYNC)
1190                  *
1191                  * XXX we could handle this on per-lnb basis as done by
1192                  * grant. */
1193                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1194                     (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1195                     OBD_BRW_FROM_GRANT)
1196                         declare_flags |= OSD_QID_FORCE;
1197         }
1198
1199         /*
1200          * each extent can go into new leaf causing a split
1201          * 5 is max tree depth: inode + 4 index blocks
1202          * with blockmaps, depth is 3 at most
1203          */
1204         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1205                 /*
1206                  * many concurrent threads may grow tree by the time
1207                  * our transaction starts. so, consider 2 is a min depth
1208                  */
1209                 depth = ext_depth(inode);
1210                 depth = max(depth, 1) + 1;
1211                 newblocks += depth;
1212                 credits++; /* inode */
1213                 credits += depth * 2 * extents;
1214         } else {
1215                 depth = 3;
1216                 newblocks += depth;
1217                 credits++; /* inode */
1218                 credits += depth * extents;
1219         }
1220
1221         /* quota space for metadata blocks */
1222         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1223
1224         /* quota space should be reported in 1K blocks */
1225         quota_space = toqb(quota_space);
1226
1227         /* each new block can go in different group (bitmap + gd) */
1228
1229         /* we can't dirty more bitmap blocks than exist */
1230         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1231                 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1232         else
1233                 credits += newblocks;
1234
1235         /* we can't dirty more gd blocks than exist */
1236         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1237                 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1238         else
1239                 credits += newblocks;
1240
1241         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1242
1243         /* make sure the over quota flags were not set */
1244         lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1245
1246         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1247                                    i_projid_read(inode), quota_space, oh,
1248                                    osd_dt_obj(dt), &flags, declare_flags);
1249
1250         /* we need only to store the overquota flags in the first lnb for
1251          * now, once we support multiple objects BRW, this code needs be
1252          * revised. */
1253         if (flags & QUOTA_FL_OVER_USRQUOTA)
1254                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1255         if (flags & QUOTA_FL_OVER_GRPQUOTA)
1256                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1257         if (flags & QUOTA_FL_OVER_PRJQUOTA)
1258                 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1259
1260         RETURN(rc);
1261 }
1262
1263 /* Check if a block is allocated or not */
1264 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1265                             struct niobuf_local *lnb, int npages,
1266                             struct thandle *thandle)
1267 {
1268         struct osd_thread_info *oti = osd_oti_get(env);
1269         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1270         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1271         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1272         loff_t isize;
1273         int rc = 0, i;
1274
1275         LASSERT(inode);
1276
1277         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1278         if (unlikely(rc != 0))
1279                 RETURN(rc);
1280
1281         isize = i_size_read(inode);
1282         ll_vfs_dq_init(inode);
1283
1284         for (i = 0; i < npages; i++) {
1285                 if (lnb[i].lnb_rc == -ENOSPC &&
1286                     (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1287                         /* Allow the write to proceed if overwriting an
1288                          * existing block */
1289                         lnb[i].lnb_rc = 0;
1290                 }
1291
1292                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1293                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1294                                lnb[i].lnb_rc);
1295                         LASSERT(lnb[i].lnb_page);
1296                         generic_error_remove_page(inode->i_mapping,
1297                                                   lnb[i].lnb_page);
1298                         continue;
1299                 }
1300
1301                 LASSERT(PageLocked(lnb[i].lnb_page));
1302                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1303
1304                 if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize)
1305                         isize = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1306
1307                 /*
1308                  * Since write and truncate are serialized by oo_sem, even
1309                  * partial-page truncate should not leave dirty pages in the
1310                  * page cache.
1311                  */
1312                 LASSERT(!PageDirty(lnb[i].lnb_page));
1313
1314                 SetPageUptodate(lnb[i].lnb_page);
1315
1316                 osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1317         }
1318
1319         osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1320
1321         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1322                 rc = -ENOSPC;
1323         } else if (iobuf->dr_npages > 0) {
1324                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1325                                                  iobuf->dr_npages,
1326                                                  iobuf->dr_blocks, 1);
1327         } else {
1328                 /* no pages to write, no transno is needed */
1329                 thandle->th_local = 1;
1330         }
1331
1332         if (likely(rc == 0)) {
1333                 spin_lock(&inode->i_lock);
1334                 if (isize > i_size_read(inode)) {
1335                         i_size_write(inode, isize);
1336                         LDISKFS_I(inode)->i_disksize = isize;
1337                         spin_unlock(&inode->i_lock);
1338                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1339                 } else {
1340                         spin_unlock(&inode->i_lock);
1341                 }
1342
1343                 rc = osd_do_bio(osd, inode, iobuf);
1344                 /* we don't do stats here as in read path because
1345                  * write is async: we'll do this in osd_put_bufs() */
1346         } else {
1347                 osd_fini_iobuf(osd, iobuf);
1348         }
1349
1350         osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1351
1352         if (unlikely(rc != 0)) {
1353                 /* if write fails, we should drop pages from the cache */
1354                 for (i = 0; i < npages; i++) {
1355                         if (lnb[i].lnb_page == NULL)
1356                                 continue;
1357                         LASSERT(PageLocked(lnb[i].lnb_page));
1358                         generic_error_remove_page(inode->i_mapping,
1359                                                   lnb[i].lnb_page);
1360                 }
1361         }
1362
1363         RETURN(rc);
1364 }
1365
1366 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1367                          struct niobuf_local *lnb, int npages)
1368 {
1369         struct osd_thread_info *oti = osd_oti_get(env);
1370         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1371         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1372         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1373         int rc = 0, i, cache = 0, cache_hits = 0, cache_misses = 0;
1374         ktime_t start, end;
1375         s64 timediff;
1376         loff_t isize;
1377
1378         LASSERT(inode);
1379
1380         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1381         if (unlikely(rc != 0))
1382                 RETURN(rc);
1383
1384         isize = i_size_read(inode);
1385
1386         if (osd->od_read_cache)
1387                 cache = 1;
1388         if (isize > osd->od_readcache_max_filesize)
1389                 cache = 0;
1390
1391         start = ktime_get();
1392         for (i = 0; i < npages; i++) {
1393
1394                 if (isize <= lnb[i].lnb_file_offset)
1395                         /* If there's no more data, abort early.
1396                          * lnb->lnb_rc == 0, so it's easy to detect later. */
1397                         break;
1398
1399                 if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len)
1400                         lnb[i].lnb_rc = isize - lnb[i].lnb_file_offset;
1401                 else
1402                         lnb[i].lnb_rc = lnb[i].lnb_len;
1403
1404                 /* Bypass disk read if fail_loc is set properly */
1405                 if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
1406                         SetPageUptodate(lnb[i].lnb_page);
1407
1408                 if (PageUptodate(lnb[i].lnb_page)) {
1409                         cache_hits++;
1410                 } else {
1411                         cache_misses++;
1412                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1413                 }
1414
1415                 if (cache == 0)
1416                         generic_error_remove_page(inode->i_mapping,
1417                                                   lnb[i].lnb_page);
1418         }
1419         end = ktime_get();
1420         timediff = ktime_us_delta(end, start);
1421         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1422
1423         if (cache_hits != 0)
1424                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1425                                     cache_hits);
1426         if (cache_misses != 0)
1427                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1428                                     cache_misses);
1429         if (cache_hits + cache_misses != 0)
1430                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1431                                     cache_hits + cache_misses);
1432
1433         if (iobuf->dr_npages) {
1434                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1435                                                  iobuf->dr_npages,
1436                                                  iobuf->dr_blocks, 0);
1437                 rc = osd_do_bio(osd, inode, iobuf);
1438
1439                 /* IO stats will be done in osd_bufs_put() */
1440         }
1441
1442         RETURN(rc);
1443 }
1444
1445 /*
1446  * XXX: Another layering violation for now.
1447  *
1448  * We don't want to use ->f_op->read methods, because generic file write
1449  *
1450  *         - serializes on ->i_sem, and
1451  *
1452  *         - does a lot of extra work like balance_dirty_pages(),
1453  *
1454  * which doesn't work for globally shared files like /last_rcvd.
1455  */
1456 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1457 {
1458         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1459
1460         memcpy(buffer, (char *)ei->i_data, buflen);
1461
1462         return  buflen;
1463 }
1464
1465 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1466 {
1467         struct buffer_head *bh;
1468         unsigned long block;
1469         int osize;
1470         int blocksize;
1471         int csize;
1472         int boffs;
1473
1474         /* prevent reading after eof */
1475         spin_lock(&inode->i_lock);
1476         if (i_size_read(inode) < *offs + size) {
1477                 loff_t diff = i_size_read(inode) - *offs;
1478                 spin_unlock(&inode->i_lock);
1479                 if (diff < 0) {
1480                         CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
1481                                i_size_read(inode), *offs);
1482                         return -EBADR;
1483                 } else if (diff == 0) {
1484                         return 0;
1485                 } else {
1486                         size = diff;
1487                 }
1488         } else {
1489                 spin_unlock(&inode->i_lock);
1490         }
1491
1492         blocksize = 1 << inode->i_blkbits;
1493         osize = size;
1494         while (size > 0) {
1495                 block = *offs >> inode->i_blkbits;
1496                 boffs = *offs & (blocksize - 1);
1497                 csize = min(blocksize - boffs, size);
1498                 bh = __ldiskfs_bread(NULL, inode, block, 0);
1499                 if (IS_ERR(bh)) {
1500                         CERROR("%s: can't read %u@%llu on ino %lu: "
1501                                "rc = %ld\n", osd_ino2name(inode),
1502                                csize, *offs, inode->i_ino,
1503                                PTR_ERR(bh));
1504                         return PTR_ERR(bh);
1505                 }
1506
1507                 if (bh != NULL) {
1508                         memcpy(buf, bh->b_data + boffs, csize);
1509                         brelse(bh);
1510                 } else {
1511                         memset(buf, 0, csize);
1512                 }
1513
1514                 *offs += csize;
1515                 buf += csize;
1516                 size -= csize;
1517         }
1518         return osize;
1519 }
1520
1521 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1522                         struct lu_buf *buf, loff_t *pos)
1523 {
1524         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1525         int           rc;
1526
1527         /* Read small symlink from inode body as we need to maintain correct
1528          * on-disk symlinks for ldiskfs.
1529          */
1530         if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
1531             (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1532                 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
1533         else
1534                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1535
1536         return rc;
1537 }
1538
1539 static inline int osd_extents_enabled(struct super_block *sb,
1540                                       struct inode *inode)
1541 {
1542         if (inode != NULL) {
1543                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1544                         return 1;
1545         } else if (ldiskfs_has_feature_extents(sb)) {
1546                 return 1;
1547         }
1548         return 0;
1549 }
1550
1551 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1552                            const loff_t size, const loff_t pos,
1553                            const int blocks)
1554 {
1555         int credits, bits, bs, i;
1556
1557         bits = sb->s_blocksize_bits;
1558         bs = 1 << bits;
1559
1560         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1561          * we do not expect blockmaps on the large files,
1562          * so let's shrink it to 2 levels (4GB files) */
1563
1564         /* this is default reservation: 2 levels */
1565         credits = (blocks + 2) * 3;
1566
1567         /* actual offset is unknown, hard to optimize */
1568         if (pos == -1)
1569                 return credits;
1570
1571         /* now check for few specific cases to optimize */
1572         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1573                 /* no indirects */
1574                 credits = blocks;
1575                 /* allocate if not allocated */
1576                 if (inode == NULL) {
1577                         credits += blocks * 2;
1578                         return credits;
1579                 }
1580                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1581                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1582                         if (LDISKFS_I(inode)->i_data[i] == 0)
1583                                 credits += 2;
1584                 }
1585         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1586                 /* single indirect */
1587                 credits = blocks * 3;
1588                 if (inode == NULL ||
1589                     LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1590                         credits += 3;
1591                 else
1592                         /* The indirect block may be modified. */
1593                         credits += 1;
1594         }
1595
1596         return credits;
1597 }
1598
1599 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1600                                  const struct lu_buf *buf, loff_t _pos,
1601                                  struct thandle *handle)
1602 {
1603         struct osd_object  *obj  = osd_dt_obj(dt);
1604         struct inode       *inode = obj->oo_inode;
1605         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1606         struct osd_thandle *oh;
1607         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1608         int                 bits, bs;
1609         int                 depth, size;
1610         loff_t              pos;
1611         ENTRY;
1612
1613         LASSERT(buf != NULL);
1614         LASSERT(handle != NULL);
1615
1616         oh = container_of0(handle, struct osd_thandle, ot_super);
1617         LASSERT(oh->ot_handle == NULL);
1618
1619         size = buf->lb_len;
1620         bits = sb->s_blocksize_bits;
1621         bs = 1 << bits;
1622
1623         if (_pos == -1) {
1624                 /* if this is an append, then we
1625                  * should expect cross-block record */
1626                 pos = 0;
1627         } else {
1628                 pos = _pos;
1629         }
1630
1631         /* blocks to modify */
1632         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1633         LASSERT(blocks > 0);
1634
1635         if (inode != NULL && _pos != -1) {
1636                 /* object size in blocks */
1637                 est = (i_size_read(inode) + bs - 1) >> bits;
1638                 allocated = inode->i_blocks >> (bits - 9);
1639                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1640                         /* looks like an overwrite, no need to modify tree */
1641                         credits = blocks;
1642                         /* no need to modify i_size */
1643                         goto out;
1644                 }
1645         }
1646
1647         if (osd_extents_enabled(sb, inode)) {
1648                 /*
1649                  * many concurrent threads may grow tree by the time
1650                  * our transaction starts. so, consider 2 is a min depth
1651                  * for every level we may need to allocate a new block
1652                  * and take some entries from the old one. so, 3 blocks
1653                  * to allocate (bitmap, gd, itself) + old block - 4 per
1654                  * level.
1655                  */
1656                 depth = inode != NULL ? ext_depth(inode) : 0;
1657                 depth = max(depth, 1) + 1;
1658                 credits = depth;
1659                 /* if not append, then split may need to modify
1660                  * existing blocks moving entries into the new ones */
1661                 if (_pos != -1)
1662                         credits += depth;
1663                 /* blocks to store data: bitmap,gd,itself */
1664                 credits += blocks * 3;
1665         } else {
1666                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1667         }
1668         /* if inode is created as part of the transaction,
1669          * then it's counted already by the creation method */
1670         if (inode != NULL)
1671                 credits++;
1672
1673 out:
1674
1675         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1676
1677         /* dt_declare_write() is usually called for system objects, such
1678          * as llog or last_rcvd files. We needn't enforce quota on those
1679          * objects, so always set the lqi_space as 0. */
1680         if (inode != NULL)
1681                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1682                                            i_gid_read(inode),
1683                                            i_projid_read(inode), 0,
1684                                            oh, obj, NULL, OSD_QID_BLK);
1685         RETURN(rc);
1686 }
1687
1688 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1689 {
1690         /* LU-2634: clear the extent format for fast symlink */
1691         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1692
1693         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1694         spin_lock(&inode->i_lock);
1695         LDISKFS_I(inode)->i_disksize = buflen;
1696         i_size_write(inode, buflen);
1697         spin_unlock(&inode->i_lock);
1698         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1699
1700         return 0;
1701 }
1702
1703 int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
1704                              int write_NUL, loff_t *offs, handle_t *handle)
1705 {
1706         struct buffer_head *bh        = NULL;
1707         loff_t              offset    = *offs;
1708         loff_t              new_size  = i_size_read(inode);
1709         unsigned long       block;
1710         int                 blocksize = 1 << inode->i_blkbits;
1711         int                 err = 0;
1712         int                 size;
1713         int                 boffs;
1714         int                 dirty_inode = 0;
1715
1716         if (write_NUL) {
1717                 /*
1718                  * long symlink write does not count the NUL terminator in
1719                  * bufsize, we write it, and the inode's file size does not
1720                  * count the NUL terminator as well.
1721                  */
1722                 ((char *)buf)[bufsize] = '\0';
1723                 ++bufsize;
1724         }
1725
1726         while (bufsize > 0) {
1727                 int credits = handle->h_buffer_credits;
1728
1729                 if (bh)
1730                         brelse(bh);
1731
1732                 block = offset >> inode->i_blkbits;
1733                 boffs = offset & (blocksize - 1);
1734                 size = min(blocksize - boffs, bufsize);
1735                 bh = __ldiskfs_bread(handle, inode, block, 1);
1736                 if (IS_ERR_OR_NULL(bh)) {
1737                         if (bh == NULL) {
1738                                 err = -EIO;
1739                         } else {
1740                                 err = PTR_ERR(bh);
1741                                 bh = NULL;
1742                         }
1743
1744                         CERROR("%s: error reading offset %llu (block %lu, "
1745                                "size %d, offs %llu), credits %d/%d: rc = %d\n",
1746                                inode->i_sb->s_id, offset, block, bufsize, *offs,
1747                                credits, handle->h_buffer_credits, err);
1748                         break;
1749                 }
1750
1751                 err = ldiskfs_journal_get_write_access(handle, bh);
1752                 if (err) {
1753                         CERROR("journal_get_write_access() returned error %d\n",
1754                                err);
1755                         break;
1756                 }
1757                 LASSERTF(boffs + size <= bh->b_size,
1758                          "boffs %d size %d bh->b_size %lu\n",
1759                          boffs, size, (unsigned long)bh->b_size);
1760                 memcpy(bh->b_data + boffs, buf, size);
1761                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
1762                 if (err)
1763                         break;
1764
1765                 if (offset + size > new_size)
1766                         new_size = offset + size;
1767                 offset += size;
1768                 bufsize -= size;
1769                 buf += size;
1770         }
1771         if (bh)
1772                 brelse(bh);
1773
1774         if (write_NUL)
1775                 --new_size;
1776         /* correct in-core and on-disk sizes */
1777         if (new_size > i_size_read(inode)) {
1778                 spin_lock(&inode->i_lock);
1779                 if (new_size > i_size_read(inode))
1780                         i_size_write(inode, new_size);
1781                 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
1782                         LDISKFS_I(inode)->i_disksize = i_size_read(inode);
1783                         dirty_inode = 1;
1784                 }
1785                 spin_unlock(&inode->i_lock);
1786                 if (dirty_inode)
1787                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1788         }
1789
1790         if (err == 0)
1791                 *offs = offset;
1792         return err;
1793 }
1794
1795 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1796                          const struct lu_buf *buf, loff_t *pos,
1797                          struct thandle *handle, int ignore_quota)
1798 {
1799         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1800         struct osd_thandle      *oh;
1801         ssize_t                 result;
1802         int                     is_link;
1803
1804         LASSERT(dt_object_exists(dt));
1805
1806         LASSERT(handle != NULL);
1807         LASSERT(inode != NULL);
1808         ll_vfs_dq_init(inode);
1809
1810         /* XXX: don't check: one declared chunk can be used many times */
1811         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
1812
1813         oh = container_of(handle, struct osd_thandle, ot_super);
1814         LASSERT(oh->ot_handle->h_transaction != NULL);
1815         osd_trans_exec_op(env, handle, OSD_OT_WRITE);
1816
1817         /* Write small symlink to inode body as we need to maintain correct
1818          * on-disk symlinks for ldiskfs.
1819          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
1820          * does not count it in.
1821          */
1822         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
1823         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1824                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
1825         else
1826                 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
1827                                                   buf->lb_len, is_link, pos,
1828                                                   oh->ot_handle);
1829         if (result == 0)
1830                 result = buf->lb_len;
1831
1832         osd_trans_exec_check(env, handle, OSD_OT_WRITE);
1833
1834         return result;
1835 }
1836
1837 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1838                              __u64 start, __u64 end, struct thandle *th)
1839 {
1840         struct osd_thandle *oh;
1841         struct inode       *inode;
1842         int                 rc;
1843         ENTRY;
1844
1845         LASSERT(th);
1846         oh = container_of(th, struct osd_thandle, ot_super);
1847
1848         /*
1849          * we don't need to reserve credits for whole truncate
1850          * it's not possible as truncate may need to free too many
1851          * blocks and that won't fit a single transaction. instead
1852          * we reserve credits to change i_size and put inode onto
1853          * orphan list. if needed truncate will extend or restart
1854          * transaction
1855          */
1856         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
1857                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
1858
1859         inode = osd_dt_obj(dt)->oo_inode;
1860         LASSERT(inode);
1861
1862         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1863                                    i_projid_read(inode), 0, oh, osd_dt_obj(dt),
1864                                    NULL, OSD_QID_BLK);
1865         RETURN(rc);
1866 }
1867
1868 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1869                      __u64 start, __u64 end, struct thandle *th)
1870 {
1871         struct osd_thandle *oh;
1872         struct osd_object  *obj = osd_dt_obj(dt);
1873         struct inode       *inode = obj->oo_inode;
1874         handle_t           *h;
1875         tid_t               tid;
1876         int                rc = 0, rc2 = 0;
1877         ENTRY;
1878
1879         LASSERT(end == OBD_OBJECT_EOF);
1880         LASSERT(dt_object_exists(dt));
1881         LASSERT(osd_invariant(obj));
1882         LASSERT(inode != NULL);
1883         ll_vfs_dq_init(inode);
1884
1885         LASSERT(th);
1886         oh = container_of(th, struct osd_thandle, ot_super);
1887         LASSERT(oh->ot_handle->h_transaction != NULL);
1888
1889         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
1890
1891         tid = oh->ot_handle->h_transaction->t_tid;
1892
1893         spin_lock(&inode->i_lock);
1894         i_size_write(inode, start);
1895         spin_unlock(&inode->i_lock);
1896         ll_truncate_pagecache(inode, start);
1897 #ifdef HAVE_INODEOPS_TRUNCATE
1898         if (inode->i_op->truncate) {
1899                 inode->i_op->truncate(inode);
1900         } else
1901 #endif
1902                 ldiskfs_truncate(inode);
1903
1904         /*
1905          * For a partial-page truncate, flush the page to disk immediately to
1906          * avoid data corruption during direct disk write.  b=17397
1907          */
1908         if ((start & ~PAGE_MASK) != 0)
1909                 rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
1910
1911         h = journal_current_handle();
1912         LASSERT(h != NULL);
1913         LASSERT(h == oh->ot_handle);
1914
1915         /* do not check credits with osd_trans_exec_check() as the truncate
1916          * can restart the transaction internally and we restart the
1917          * transaction in this case */
1918
1919         if (tid != h->h_transaction->t_tid) {
1920                 int credits = oh->ot_credits;
1921                 /*
1922                  * transaction has changed during truncate
1923                  * we need to restart the handle with our credits
1924                  */
1925                 if (h->h_buffer_credits < credits) {
1926                         if (ldiskfs_journal_extend(h, credits))
1927                                 rc2 = ldiskfs_journal_restart(h, credits);
1928                 }
1929         }
1930
1931         RETURN(rc == 0 ? rc2 : rc);
1932 }
1933
1934 static int fiemap_check_ranges(struct inode *inode,
1935                                u64 start, u64 len, u64 *new_len)
1936 {
1937         loff_t maxbytes;
1938
1939         *new_len = len;
1940
1941         if (len == 0)
1942                 return -EINVAL;
1943
1944         if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
1945                 maxbytes = inode->i_sb->s_maxbytes;
1946         else
1947                 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
1948
1949         if (start > maxbytes)
1950                 return -EFBIG;
1951
1952         /*
1953          * Shrink request scope to what the fs can actually handle.
1954          */
1955         if (len > maxbytes || (maxbytes - len) < start)
1956                 *new_len = maxbytes - start;
1957
1958         return 0;
1959 }
1960
1961 /* So that the fiemap access checks can't overflow on 32 bit machines. */
1962 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
1963
1964 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
1965                           struct fiemap *fm)
1966 {
1967         struct fiemap_extent_info fieinfo = {0, };
1968         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1969         u64 len;
1970         int rc;
1971
1972
1973         LASSERT(inode);
1974         if (inode->i_op->fiemap == NULL)
1975                 return -EOPNOTSUPP;
1976
1977         if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
1978                 return -EINVAL;
1979
1980         rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
1981         if (rc)
1982                 return rc;
1983
1984         fieinfo.fi_flags = fm->fm_flags;
1985         fieinfo.fi_extents_max = fm->fm_extent_count;
1986         fieinfo.fi_extents_start = fm->fm_extents;
1987
1988         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
1989                 filemap_write_and_wait(inode->i_mapping);
1990
1991         rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
1992         fm->fm_flags = fieinfo.fi_flags;
1993         fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
1994
1995         return rc;
1996 }
1997
1998 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
1999                        __u64 start, __u64 end, enum lu_ladvise_type advice)
2000 {
2001         int              rc = 0;
2002         struct inode    *inode = osd_dt_obj(dt)->oo_inode;
2003         ENTRY;
2004
2005         switch (advice) {
2006         case LU_LADVISE_DONTNEED:
2007                 if (end == 0)
2008                         break;
2009                 invalidate_mapping_pages(inode->i_mapping,
2010                                          start >> PAGE_CACHE_SHIFT,
2011                                          (end - 1) >> PAGE_CACHE_SHIFT);
2012                 break;
2013         default:
2014                 rc = -ENOTSUPP;
2015                 break;
2016         }
2017
2018         RETURN(rc);
2019 }
2020
2021 /*
2022  * in some cases we may need declare methods for objects being created
2023  * e.g., when we create symlink
2024  */
2025 const struct dt_body_operations osd_body_ops_new = {
2026         .dbo_declare_write = osd_declare_write,
2027 };
2028
2029 const struct dt_body_operations osd_body_ops = {
2030         .dbo_read                       = osd_read,
2031         .dbo_declare_write              = osd_declare_write,
2032         .dbo_write                      = osd_write,
2033         .dbo_bufs_get                   = osd_bufs_get,
2034         .dbo_bufs_put                   = osd_bufs_put,
2035         .dbo_write_prep                 = osd_write_prep,
2036         .dbo_declare_write_commit       = osd_declare_write_commit,
2037         .dbo_write_commit               = osd_write_commit,
2038         .dbo_read_prep                  = osd_read_prep,
2039         .dbo_declare_punch              = osd_declare_punch,
2040         .dbo_punch                      = osd_punch,
2041         .dbo_fiemap_get                 = osd_fiemap_get,
2042         .dbo_ladvise                    = osd_ladvise,
2043 };