Whamcloud - gitweb
LU-10472 osd-ldiskfs: add T10PI support for BIO
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd/osd_io.c
33  *
34  * body operations
35  *
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
38  *
39  */
40
41 /* prerequisite for linux/xattr.h */
42 #include <linux/types.h>
43 /* prerequisite for linux/xattr.h */
44 #include <linux/fs.h>
45 #include <linux/mm.h>
46 #include <linux/pagevec.h>
47
48 /*
49  * struct OBD_{ALLOC,FREE}*()
50  * OBD_FAIL_CHECK
51  */
52 #include <obd_support.h>
53
54 #include "osd_internal.h"
55
56 /* ext_depth() */
57 #include <ldiskfs/ldiskfs_extents.h>
58
59 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
60                             int rw, int line, int pages)
61 {
62         int blocks, i;
63
64         LASSERTF(iobuf->dr_elapsed_valid == 0,
65                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
66                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
67                  iobuf->dr_init_at);
68         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
69
70         init_waitqueue_head(&iobuf->dr_wait);
71         atomic_set(&iobuf->dr_numreqs, 0);
72         iobuf->dr_npages = 0;
73         iobuf->dr_error = 0;
74         iobuf->dr_dev = d;
75         iobuf->dr_frags = 0;
76         iobuf->dr_elapsed = ktime_set(0, 0);
77         /* must be counted before, so assert */
78         iobuf->dr_rw = rw;
79         iobuf->dr_init_at = line;
80
81         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
82         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
83                 LASSERT(iobuf->dr_pg_buf.lb_len >=
84                         pages * sizeof(iobuf->dr_pages[0]));
85                 return 0;
86         }
87
88         /* start with 1MB for 4K blocks */
89         i = 256;
90         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
91                 i <<= 1;
92
93         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
94                (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
95         pages = i;
96         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
97         iobuf->dr_max_pages = 0;
98         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
99                (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
100
101         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
102         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
103         if (unlikely(iobuf->dr_blocks == NULL))
104                 return -ENOMEM;
105
106         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
107         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
108         if (unlikely(iobuf->dr_pages == NULL))
109                 return -ENOMEM;
110
111         iobuf->dr_max_pages = pages;
112
113         return 0;
114 }
115 #define osd_init_iobuf(dev, iobuf, rw, pages) \
116         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
117
118 static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
119 {
120         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
121         iobuf->dr_pages[iobuf->dr_npages++] = page;
122 }
123
124 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
125 {
126         int rw = iobuf->dr_rw;
127
128         if (iobuf->dr_elapsed_valid) {
129                 iobuf->dr_elapsed_valid = 0;
130                 LASSERT(iobuf->dr_dev == d);
131                 LASSERT(iobuf->dr_frags > 0);
132                 lprocfs_oh_tally(&d->od_brw_stats.
133                                  hist[BRW_R_DIO_FRAGS+rw],
134                                  iobuf->dr_frags);
135                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
136                                       ktime_to_ms(iobuf->dr_elapsed));
137         }
138 }
139
140 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
141 static void dio_complete_routine(struct bio *bio)
142 {
143 # ifdef HAVE_BI_STATUS
144         int error = bio->bi_status;
145 # else
146         int error = bio->bi_error;
147 # endif
148 #else
149 static void dio_complete_routine(struct bio *bio, int error)
150 {
151 #endif
152         struct osd_iobuf *iobuf = bio->bi_private;
153         int iter;
154         struct bio_vec *bvl;
155
156         /* CAVEAT EMPTOR: possibly in IRQ context
157          * DO NOT record procfs stats here!!! */
158
159         if (unlikely(iobuf == NULL)) {
160                 CERROR("***** bio->bi_private is NULL!  This should never "
161                        "happen.  Normally, I would crash here, but instead I "
162                        "will dump the bio contents to the console.  Please "
163                        "report this to <https://jira.hpdd.intel.com/> , along "
164                        "with any interesting messages leading up to this point "
165                        "(like SCSI errors, perhaps).  Because bi_private is "
166                        "NULL, I can't wake up the thread that initiated this "
167                        "IO - you will probably have to reboot this node.\n");
168                 CERROR("bi_next: %p, bi_flags: %lx, "
169 #ifdef HAVE_BI_RW
170                        "bi_rw: %lu,"
171 #else
172                        "bi_opf: %u,"
173 #endif
174                        "bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p,"
175                        "bi_cnt: %d, bi_private: %p\n", bio->bi_next,
176                         (unsigned long)bio->bi_flags,
177 #ifdef HAVE_BI_RW
178                         bio->bi_rw,
179 #else
180                         bio->bi_opf,
181 #endif
182                         bio->bi_vcnt, bio_idx(bio),
183                         bio_sectors(bio) << 9, bio->bi_end_io,
184 #ifdef HAVE_BI_CNT
185                         atomic_read(&bio->bi_cnt),
186 #else
187                         atomic_read(&bio->__bi_cnt),
188 #endif
189                         bio->bi_private);
190                 return;
191         }
192
193         /* the check is outside of the cycle for performance reason -bzzz */
194         if (!bio_data_dir(bio)) {
195                 bio_for_each_segment_all(bvl, bio, iter) {
196                         if (likely(error == 0))
197                                 SetPageUptodate(bvl_to_page(bvl));
198                         LASSERT(PageLocked(bvl_to_page(bvl)));
199                 }
200                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
201         } else {
202                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
203         }
204
205         /* any real error is good enough -bzzz */
206         if (error != 0 && iobuf->dr_error == 0)
207                 iobuf->dr_error = error;
208
209         /*
210          * set dr_elapsed before dr_numreqs turns to 0, otherwise
211          * it's possible that service thread will see dr_numreqs
212          * is zero, but dr_elapsed is not set yet, leading to lost
213          * data in this processing and an assertion in a subsequent
214          * call to OSD.
215          */
216         if (atomic_read(&iobuf->dr_numreqs) == 1) {
217                 ktime_t now = ktime_get();
218
219                 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
220                 iobuf->dr_elapsed_valid = 1;
221         }
222         if (atomic_dec_and_test(&iobuf->dr_numreqs))
223                 wake_up(&iobuf->dr_wait);
224
225         /* Completed bios used to be chained off iobuf->dr_bios and freed in
226          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
227          * mempool when serious on-disk fragmentation was encountered,
228          * deadlocking the OST.  The bios are now released as soon as complete
229          * so the pool cannot be exhausted while IOs are competing. bug 10076 */
230         bio_put(bio);
231 }
232
233 static void record_start_io(struct osd_iobuf *iobuf, int size)
234 {
235         struct osd_device    *osd = iobuf->dr_dev;
236         struct obd_histogram *h = osd->od_brw_stats.hist;
237
238         iobuf->dr_frags++;
239         atomic_inc(&iobuf->dr_numreqs);
240
241         if (iobuf->dr_rw == 0) {
242                 atomic_inc(&osd->od_r_in_flight);
243                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
244                                  atomic_read(&osd->od_r_in_flight));
245                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
246         } else if (iobuf->dr_rw == 1) {
247                 atomic_inc(&osd->od_w_in_flight);
248                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
249                                  atomic_read(&osd->od_w_in_flight));
250                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
251         } else {
252                 LBUG();
253         }
254 }
255
256 static void osd_submit_bio(int rw, struct bio *bio)
257 {
258         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
259 #ifdef HAVE_SUBMIT_BIO_2ARGS
260         if (rw == 0)
261                 submit_bio(READ, bio);
262         else
263                 submit_bio(WRITE, bio);
264 #else
265         bio->bi_opf |= rw;
266         submit_bio(bio);
267 #endif
268 }
269
270 static int can_be_merged(struct bio *bio, sector_t sector)
271 {
272         if (bio == NULL)
273                 return 0;
274
275         return bio_end_sector(bio) == sector ? 1 : 0;
276 }
277
278 /*
279  * This function will change the data written, thus it should only be
280  * used when checking data integrity feature
281  */
282 static void bio_integrity_fault_inject(struct bio *bio)
283 {
284         struct bio_vec *bvec;
285         int i;
286         void *kaddr;
287         char *addr;
288
289         bio_for_each_segment_all(bvec, bio, i) {
290                 struct page *page = bvec->bv_page;
291
292                 kaddr = kmap(page);
293                 addr = kaddr;
294                 *addr = ~(*addr);
295                 kunmap(page);
296                 break;
297         }
298 }
299
300 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
301                       struct osd_iobuf *iobuf)
302 {
303         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
304         struct page **pages = iobuf->dr_pages;
305         int npages = iobuf->dr_npages;
306         sector_t *blocks = iobuf->dr_blocks;
307         int total_blocks = npages * blocks_per_page;
308         int sector_bits = inode->i_sb->s_blocksize_bits - 9;
309         unsigned int blocksize = inode->i_sb->s_blocksize;
310         struct bio *bio = NULL;
311         struct page *page;
312         unsigned int page_offset;
313         sector_t sector;
314         int nblocks;
315         int block_idx;
316         int page_idx;
317         int i;
318         int rc = 0;
319         bool fault_inject;
320         DECLARE_PLUG(plug);
321         ENTRY;
322
323         fault_inject = OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT);
324         LASSERT(iobuf->dr_npages == npages);
325
326         osd_brw_stats_update(osd, iobuf);
327         iobuf->dr_start_time = ktime_get();
328
329         blk_start_plug(&plug);
330         for (page_idx = 0, block_idx = 0;
331              page_idx < npages;
332              page_idx++, block_idx += blocks_per_page) {
333
334                 page = pages[page_idx];
335                 LASSERT(block_idx + blocks_per_page <= total_blocks);
336
337                 for (i = 0, page_offset = 0;
338                      i < blocks_per_page;
339                      i += nblocks, page_offset += blocksize * nblocks) {
340
341                         nblocks = 1;
342
343                         if (blocks[block_idx + i] == 0) {  /* hole */
344                                 LASSERTF(iobuf->dr_rw == 0,
345                                          "page_idx %u, block_idx %u, i %u\n",
346                                          page_idx, block_idx, i);
347                                 memset(kmap(page) + page_offset, 0, blocksize);
348                                 kunmap(page);
349                                 continue;
350                         }
351
352                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
353
354                         /* Additional contiguous file blocks? */
355                         while (i + nblocks < blocks_per_page &&
356                                (sector + (nblocks << sector_bits)) ==
357                                ((sector_t)blocks[block_idx + i + nblocks] <<
358                                 sector_bits))
359                                 nblocks++;
360
361                         if (bio != NULL &&
362                             can_be_merged(bio, sector) &&
363                             bio_add_page(bio, page,
364                                          blocksize * nblocks, page_offset) != 0)
365                                 continue;       /* added this frag OK */
366
367                         if (bio != NULL) {
368                                 struct request_queue *q = bio_get_queue(bio);
369                                 unsigned int bi_size = bio_sectors(bio) << 9;
370
371                                 /* Dang! I have to fragment this I/O */
372                                 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
373                                        "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
374                                        bi_size, bio->bi_vcnt, bio->bi_max_vecs,
375                                        bio_sectors(bio),
376                                        queue_max_sectors(q),
377                                        bio_phys_segments(q, bio),
378                                        queue_max_phys_segments(q),
379                                        0, queue_max_hw_segments(q));
380                                 if (bio_integrity_enabled(bio)) {
381                                         if (bio_integrity_prep(bio)) {
382                                                 bio_put(bio);
383                                                 rc = -EIO;
384                                                 goto out;
385                                         }
386                                         if (unlikely(fault_inject))
387                                                 bio_integrity_fault_inject(bio);
388                                 }
389
390                                 record_start_io(iobuf, bi_size);
391                                 osd_submit_bio(iobuf->dr_rw, bio);
392                         }
393
394                         /* allocate new bio */
395                         bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
396                                                       (npages - page_idx) *
397                                                       blocks_per_page));
398                         if (bio == NULL) {
399                                 CERROR("Can't allocate bio %u*%u = %u pages\n",
400                                        (npages - page_idx), blocks_per_page,
401                                        (npages - page_idx) * blocks_per_page);
402                                 rc = -ENOMEM;
403                                 goto out;
404                         }
405
406                         bio_set_dev(bio, inode->i_sb->s_bdev);
407                         bio_set_sector(bio, sector);
408 #ifdef HAVE_BI_RW
409                         bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
410 #else
411                         bio->bi_opf = (iobuf->dr_rw == 0) ? READ : WRITE;
412 #endif
413                         bio->bi_end_io = dio_complete_routine;
414                         bio->bi_private = iobuf;
415
416                         rc = bio_add_page(bio, page,
417                                           blocksize * nblocks, page_offset);
418                         LASSERT(rc != 0);
419                 }
420         }
421
422         if (bio != NULL) {
423                 if (bio_integrity_enabled(bio)) {
424                         if (bio_integrity_prep(bio)) {
425                                 bio_put(bio);
426                                 rc = -EIO;
427                                 goto out;
428                         }
429                         if (unlikely(fault_inject))
430                                 bio_integrity_fault_inject(bio);
431                 }
432
433                 record_start_io(iobuf, bio_sectors(bio) << 9);
434                 osd_submit_bio(iobuf->dr_rw, bio);
435                 rc = 0;
436         }
437
438 out:
439         blk_finish_plug(&plug);
440
441         /* in order to achieve better IO throughput, we don't wait for writes
442          * completion here. instead we proceed with transaction commit in
443          * parallel and wait for IO completion once transaction is stopped
444          * see osd_trans_stop() for more details -bzzz */
445         if (iobuf->dr_rw == 0 || fault_inject) {
446                 wait_event(iobuf->dr_wait,
447                            atomic_read(&iobuf->dr_numreqs) == 0);
448                 osd_fini_iobuf(osd, iobuf);
449         }
450
451         if (rc == 0)
452                 rc = iobuf->dr_error;
453         RETURN(rc);
454 }
455
456 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
457                                    struct niobuf_local *lnb)
458 {
459         ENTRY;
460
461         *nrpages = 0;
462
463         while (len > 0) {
464                 int poff = offset & (PAGE_SIZE - 1);
465                 int plen = PAGE_SIZE - poff;
466
467                 if (plen > len)
468                         plen = len;
469                 lnb->lnb_file_offset = offset;
470                 lnb->lnb_page_offset = poff;
471                 lnb->lnb_len = plen;
472                 /* lnb->lnb_flags = rnb->rnb_flags; */
473                 lnb->lnb_flags = 0;
474                 lnb->lnb_page = NULL;
475                 lnb->lnb_rc = 0;
476
477                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
478                          (long long) len);
479                 offset += plen;
480                 len -= plen;
481                 lnb++;
482                 (*nrpages)++;
483         }
484
485         RETURN(0);
486 }
487
488 static struct page *osd_get_page(struct dt_object *dt, loff_t offset,
489                                  gfp_t gfp_mask)
490 {
491         struct inode *inode = osd_dt_obj(dt)->oo_inode;
492         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
493         struct page *page;
494
495         LASSERT(inode);
496
497         page = find_or_create_page(inode->i_mapping, offset >> PAGE_SHIFT,
498                                    gfp_mask);
499
500         if (unlikely(page == NULL))
501                 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
502
503         return page;
504 }
505
506 /*
507  * there are following "locks":
508  * journal_start
509  * i_mutex
510  * page lock
511  *
512  * osd write path:
513  *  - lock page(s)
514  *  - journal_start
515  *  - truncate_sem
516  *
517  * ext4 vmtruncate:
518  *  - lock pages, unlock
519  *  - journal_start
520  *  - lock partial page
521  *  - i_data_sem
522  *
523  */
524
525 /**
526  * Unlock and release pages loaded by osd_bufs_get()
527  *
528  * Unlock \a npages pages from \a lnb and drop the refcount on them.
529  *
530  * \param env           thread execution environment
531  * \param dt            dt object undergoing IO (OSD object + methods)
532  * \param lnb           array of pages undergoing IO
533  * \param npages        number of pages in \a lnb
534  *
535  * \retval 0            always
536  */
537 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
538                         struct niobuf_local *lnb, int npages)
539 {
540         struct pagevec pvec;
541         int i;
542
543 #ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
544         pagevec_init(&pvec);
545 #else
546         pagevec_init(&pvec, 0);
547 #endif
548
549         for (i = 0; i < npages; i++) {
550                 if (lnb[i].lnb_page == NULL)
551                         continue;
552                 LASSERT(PageLocked(lnb[i].lnb_page));
553                 unlock_page(lnb[i].lnb_page);
554                 if (pagevec_add(&pvec, lnb[i].lnb_page) == 0)
555                         pagevec_release(&pvec);
556                 dt_object_put(env, dt);
557                 lnb[i].lnb_page = NULL;
558         }
559
560         /* Release any partial pagevec */
561         pagevec_release(&pvec);
562
563         RETURN(0);
564 }
565
566 /**
567  * Load and lock pages undergoing IO
568  *
569  * Pages as described in the \a lnb array are fetched (from disk or cache)
570  * and locked for IO by the caller.
571  *
572  * DLM locking protects us from write and truncate competing for same region,
573  * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
574  * It's possible the writeout on a such a page is in progress when we access
575  * it. It's also possible that during this writeout we put new (partial) data
576  * into the page, but won't be able to proceed in filter_commitrw_write().
577  * Therefore, just wait for writeout completion as it should be rare enough.
578  *
579  * \param env           thread execution environment
580  * \param dt            dt object undergoing IO (OSD object + methods)
581  * \param pos           byte offset of IO start
582  * \param len           number of bytes of IO
583  * \param lnb           array of extents undergoing IO
584  * \param rw            read or write operation, and other flags
585  * \param capa          capabilities
586  *
587  * \retval pages        (zero or more) loaded successfully
588  * \retval -ENOMEM      on memory/page allocation error
589  */
590 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
591                         loff_t pos, ssize_t len, struct niobuf_local *lnb,
592                         enum dt_bufs_type rw)
593 {
594         struct osd_object *obj = osd_dt_obj(dt);
595         int npages, i, rc = 0;
596         gfp_t gfp_mask;
597
598         LASSERT(obj->oo_inode);
599
600         osd_map_remote_to_local(pos, len, &npages, lnb);
601
602         /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
603         gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
604                                              GFP_HIGHUSER;
605         for (i = 0; i < npages; i++, lnb++) {
606                 lnb->lnb_page = osd_get_page(dt, lnb->lnb_file_offset,
607                                              gfp_mask);
608                 if (lnb->lnb_page == NULL)
609                         GOTO(cleanup, rc = -ENOMEM);
610
611                 wait_on_page_writeback(lnb->lnb_page);
612                 BUG_ON(PageWriteback(lnb->lnb_page));
613
614                 lu_object_get(&dt->do_lu);
615         }
616
617         RETURN(i);
618
619 cleanup:
620         if (i > 0)
621                 osd_bufs_put(env, dt, lnb - i, i);
622         return rc;
623 }
624
625 #ifndef HAVE_LDISKFS_MAP_BLOCKS
626
627 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
628 #define ldiskfs_ext_pblock(ex) ext_pblock((ex))
629 #endif
630
631 struct bpointers {
632         sector_t *blocks;
633         unsigned long start;
634         int num;
635         int init_num;
636         int create;
637 };
638
639 static long ldiskfs_ext_find_goal(struct inode *inode,
640                                   struct ldiskfs_ext_path *path,
641                                   unsigned long block, int *aflags)
642 {
643         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
644         unsigned long bg_start;
645         unsigned long colour;
646         int depth;
647
648         if (path) {
649                 struct ldiskfs_extent *ex;
650                 depth = path->p_depth;
651
652                 /* try to predict block placement */
653                 if ((ex = path[depth].p_ext))
654                         return ldiskfs_ext_pblock(ex) +
655                                 (block - le32_to_cpu(ex->ee_block));
656
657                 /* it looks index is empty
658                  * try to find starting from index itself */
659                 if (path[depth].p_bh)
660                         return path[depth].p_bh->b_blocknr;
661         }
662
663         /* OK. use inode's group */
664         bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) +
665                 le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block);
666         colour = (current->pid % 16) *
667                 (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16);
668         return bg_start + colour + block;
669 }
670
671 static unsigned long new_blocks(handle_t *handle, struct inode *inode,
672                                 struct ldiskfs_ext_path *path,
673                                 unsigned long block, unsigned long *count,
674                                 int *err)
675 {
676         struct ldiskfs_allocation_request ar;
677         unsigned long pblock;
678         int aflags;
679
680         /* find neighbour allocated blocks */
681         ar.lleft = block;
682         *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
683         if (*err)
684                 return 0;
685         ar.lright = block;
686         *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright);
687         if (*err)
688                 return 0;
689
690         /* allocate new block */
691         ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags);
692         ar.inode = inode;
693         ar.logical = block;
694         ar.len = *count;
695         ar.flags = LDISKFS_MB_HINT_DATA;
696         pblock = ldiskfs_mb_new_blocks(handle, &ar, err);
697         *count = ar.len;
698         return pblock;
699 }
700
701 static int ldiskfs_ext_new_extent_cb(struct inode *inode,
702                                      struct ldiskfs_ext_path *path,
703                                      struct ldiskfs_ext_cache *cex,
704 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
705                                      struct ldiskfs_extent *ex,
706 #endif
707                                      void *cbdata)
708 {
709         struct bpointers *bp = cbdata;
710         struct ldiskfs_extent nex;
711         unsigned long pblock = 0;
712         unsigned long tgen;
713         int err, i;
714         unsigned long count;
715         handle_t *handle;
716
717 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
718         if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) {
719 #else
720         if ((cex->ec_len != 0) && (cex->ec_start != 0)) {
721 #endif
722                 err = EXT_CONTINUE;
723                 goto map;
724         }
725
726         if (bp->create == 0) {
727                 i = 0;
728                 if (cex->ec_block < bp->start)
729                         i = bp->start - cex->ec_block;
730                 if (i >= cex->ec_len)
731                         CERROR("nothing to do?! i = %d, e_num = %u\n",
732                                         i, cex->ec_len);
733                 for (; i < cex->ec_len && bp->num; i++) {
734                         *(bp->blocks) = 0;
735                         bp->blocks++;
736                         bp->num--;
737                         bp->start++;
738                 }
739
740                 return EXT_CONTINUE;
741         }
742
743         tgen = LDISKFS_I(inode)->i_ext_generation;
744         count = ldiskfs_ext_calc_credits_for_insert(inode, path);
745
746         handle = osd_journal_start(inode, LDISKFS_HT_MISC,
747                                    count + LDISKFS_ALLOC_NEEDED + 1);
748         if (IS_ERR(handle)) {
749                 return PTR_ERR(handle);
750         }
751
752         if (tgen != LDISKFS_I(inode)->i_ext_generation) {
753                 /* the tree has changed. so path can be invalid at moment */
754                 ldiskfs_journal_stop(handle);
755                 return EXT_REPEAT;
756         }
757
758         /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not
759          * protected by i_data_sem as whole. so we patch it to store
760          * generation to path and now verify the tree hasn't changed */
761         down_write((&LDISKFS_I(inode)->i_data_sem));
762
763         /* validate extent, make sure the extent tree does not changed */
764         if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) {
765                 /* cex is invalid, try again */
766                 up_write(&LDISKFS_I(inode)->i_data_sem);
767                 ldiskfs_journal_stop(handle);
768                 return EXT_REPEAT;
769         }
770
771         count = cex->ec_len;
772         pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err);
773         if (!pblock)
774                 goto out;
775         BUG_ON(count > cex->ec_len);
776
777         /* insert new extent */
778         nex.ee_block = cpu_to_le32(cex->ec_block);
779         ldiskfs_ext_store_pblock(&nex, pblock);
780         nex.ee_len = cpu_to_le16(count);
781         err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0);
782         if (err) {
783                 /* free data blocks we just allocated */
784                 /* not a good idea to call discard here directly,
785                  * but otherwise we'd need to call it every free() */
786                 ldiskfs_discard_preallocations(inode);
787 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
788                 ldiskfs_free_blocks(handle, inode, NULL,
789                                     ldiskfs_ext_pblock(&nex),
790                                     le16_to_cpu(nex.ee_len), 0);
791 #else
792                 ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex),
793                                     le16_to_cpu(nex.ee_len), 0);
794 #endif
795                 goto out;
796         }
797
798         /*
799          * Putting len of the actual extent we just inserted,
800          * we are asking ldiskfs_ext_walk_space() to continue
801          * scaning after that block
802          */
803         cex->ec_len = le16_to_cpu(nex.ee_len);
804         cex->ec_start = ldiskfs_ext_pblock(&nex);
805         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
806         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
807
808 out:
809         up_write((&LDISKFS_I(inode)->i_data_sem));
810         ldiskfs_journal_stop(handle);
811 map:
812         if (err >= 0) {
813                 /* map blocks */
814                 if (bp->num == 0) {
815                         CERROR("hmm. why do we find this extent?\n");
816                         CERROR("initial space: %lu:%u\n",
817                                 bp->start, bp->init_num);
818 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
819                         CERROR("current extent: %u/%u/%llu %d\n",
820                                 cex->ec_block, cex->ec_len,
821                                 (unsigned long long)cex->ec_start,
822                                 cex->ec_type);
823 #else
824                         CERROR("current extent: %u/%u/%llu\n",
825                                 cex->ec_block, cex->ec_len,
826                                 (unsigned long long)cex->ec_start);
827 #endif
828                 }
829                 i = 0;
830                 if (cex->ec_block < bp->start)
831                         i = bp->start - cex->ec_block;
832                 if (i >= cex->ec_len)
833                         CERROR("nothing to do?! i = %d, e_num = %u\n",
834                                         i, cex->ec_len);
835                 for (; i < cex->ec_len && bp->num; i++) {
836                         *(bp->blocks) = cex->ec_start + i;
837                         if (pblock != 0) {
838                                 /* unmap any possible underlying metadata from
839                                  * the block device mapping.  bug 6998. */
840 #ifndef HAVE_CLEAN_BDEV_ALIASES
841                                 unmap_underlying_metadata(inode->i_sb->s_bdev,
842                                                           *(bp->blocks));
843 #else
844                                 clean_bdev_aliases(inode->i_sb->s_bdev,
845                                                    *(bp->blocks), 1);
846 #endif
847                         }
848                         bp->blocks++;
849                         bp->num--;
850                         bp->start++;
851                 }
852         }
853         return err;
854 }
855
856 static int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long index,
857                                    int clen, sector_t *blocks, int create)
858 {
859         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
860         struct bpointers bp;
861         int err;
862
863         if (index + clen >= inode->i_sb->s_maxbytes >> PAGE_SHIFT)
864                 return -EFBIG;
865
866         bp.blocks = blocks;
867         bp.start = index * blocks_per_page;
868         bp.init_num = bp.num = clen * blocks_per_page;
869         bp.create = create;
870
871         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
872                bp.start, bp.start + bp.num - 1, (unsigned)inode->i_ino);
873
874         err = ldiskfs_ext_walk_space(inode, bp.start, bp.num,
875                                      ldiskfs_ext_new_extent_cb, &bp);
876         ldiskfs_ext_invalidate_cache(inode);
877
878         return err;
879 }
880
881 static int osd_ldiskfs_map_bm_inode_pages(struct inode *inode,
882                                           struct page **page, int pages,
883                                           sector_t *blocks, int create)
884 {
885         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
886         pgoff_t bitmap_max_page_index;
887         sector_t *b;
888         int rc = 0, i;
889
890         bitmap_max_page_index = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes >>
891                                 PAGE_SHIFT;
892         for (i = 0, b = blocks; i < pages; i++, page++) {
893                 if ((*page)->index + 1 >= bitmap_max_page_index) {
894                         rc = -EFBIG;
895                         break;
896                 }
897                 rc = ldiskfs_map_inode_page(inode, *page, b, create);
898                 if (rc) {
899                         CERROR("ino %lu, blk %llu create %d: rc %d\n",
900                                inode->i_ino,
901                                (unsigned long long)*b, create, rc);
902                         break;
903                 }
904                 b += blocks_per_page;
905         }
906         return rc;
907 }
908
909 static int osd_ldiskfs_map_ext_inode_pages(struct inode *inode,
910                                            struct page **page,
911                                            int pages, sector_t *blocks,
912                                            int create)
913 {
914         int rc = 0, i = 0, clen = 0;
915         struct page *fp = NULL;
916
917         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
918                 inode->i_ino, pages, (*page)->index);
919
920         /* pages are sorted already. so, we just have to find
921          * contig. space and process them properly */
922         while (i < pages) {
923                 if (fp == NULL) {
924                         /* start new extent */
925                         fp = *page++;
926                         clen = 1;
927                         i++;
928                         continue;
929                 } else if (fp->index + clen == (*page)->index) {
930                         /* continue the extent */
931                         page++;
932                         clen++;
933                         i++;
934                         continue;
935                 }
936
937                 /* process found extent */
938                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
939                                              blocks, create);
940                 if (rc)
941                         GOTO(cleanup, rc);
942
943                 /* look for next extent */
944                 fp = NULL;
945                 blocks += clen * (PAGE_SIZE >> inode->i_blkbits);
946         }
947
948         if (fp)
949                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
950                                              blocks, create);
951
952 cleanup:
953         return rc;
954 }
955
956 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
957                                        int pages, sector_t *blocks,
958                                        int create)
959 {
960         int rc;
961
962         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
963                 rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
964                                                      blocks, create);
965                 return rc;
966         }
967         rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create);
968
969         return rc;
970 }
971 #else
972 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
973                                        int pages, sector_t *blocks,
974                                        int create)
975 {
976         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
977         int rc = 0, i = 0;
978         struct page *fp = NULL;
979         int clen = 0;
980         pgoff_t max_page_index;
981         handle_t *handle = NULL;
982
983         max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
984
985         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
986                 inode->i_ino, pages, (*page)->index);
987
988         if (create) {
989                 create = LDISKFS_GET_BLOCKS_CREATE;
990                 handle = ldiskfs_journal_current_handle();
991                 LASSERT(handle != NULL);
992                 rc = osd_attach_jinode(inode);
993                 if (rc)
994                         return rc;
995         }
996         /* pages are sorted already. so, we just have to find
997          * contig. space and process them properly */
998         while (i < pages) {
999                 long blen, total = 0;
1000                 struct ldiskfs_map_blocks map = { 0 };
1001
1002                 if (fp == NULL) { /* start new extent */
1003                         fp = *page++;
1004                         clen = 1;
1005                         if (++i != pages)
1006                                 continue;
1007                 } else if (fp->index + clen == (*page)->index) {
1008                         /* continue the extent */
1009                         page++;
1010                         clen++;
1011                         if (++i != pages)
1012                                 continue;
1013                 }
1014                 if (fp->index + clen >= max_page_index)
1015                         GOTO(cleanup, rc = -EFBIG);
1016                 /* process found extent */
1017                 map.m_lblk = fp->index * blocks_per_page;
1018                 map.m_len = blen = clen * blocks_per_page;
1019 cont_map:
1020                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
1021                 if (rc >= 0) {
1022                         int c = 0;
1023                         for (; total < blen && c < map.m_len; c++, total++) {
1024                                 if (rc == 0) {
1025                                         *(blocks + total) = 0;
1026                                         total++;
1027                                         break;
1028                                 } else {
1029                                         *(blocks + total) = map.m_pblk + c;
1030                                         /* unmap any possible underlying
1031                                          * metadata from the block device
1032                                          * mapping.  bug 6998. */
1033                                         if ((map.m_flags & LDISKFS_MAP_NEW) &&
1034                                             create)
1035 #ifndef HAVE_CLEAN_BDEV_ALIASES
1036                                                 unmap_underlying_metadata(
1037                                                         inode->i_sb->s_bdev,
1038                                                         map.m_pblk + c);
1039 #else
1040                                                 clean_bdev_aliases(
1041                                                         inode->i_sb->s_bdev,
1042                                                         map.m_pblk + c, 1);
1043 #endif
1044                                 }
1045                         }
1046                         rc = 0;
1047                 }
1048                 if (rc == 0 && total < blen) {
1049                         map.m_lblk = fp->index * blocks_per_page + total;
1050                         map.m_len = blen - total;
1051                         goto cont_map;
1052                 }
1053                 if (rc != 0)
1054                         GOTO(cleanup, rc);
1055
1056                 /* look for next extent */
1057                 fp = NULL;
1058                 blocks += blocks_per_page * clen;
1059         }
1060 cleanup:
1061         return rc;
1062 }
1063 #endif /* HAVE_LDISKFS_MAP_BLOCKS */
1064
1065 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1066                           struct niobuf_local *lnb, int npages)
1067 {
1068         struct osd_thread_info *oti   = osd_oti_get(env);
1069         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
1070         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
1071         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
1072         ktime_t start;
1073         ktime_t end;
1074         s64 timediff;
1075         ssize_t                 isize;
1076         __s64                   maxidx;
1077         int                     rc = 0;
1078         int                     i;
1079         int                     cache = 0;
1080
1081         LASSERT(inode);
1082
1083         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1084         if (unlikely(rc != 0))
1085                 RETURN(rc);
1086
1087         isize = i_size_read(inode);
1088         maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1089
1090         if (osd->od_writethrough_cache)
1091                 cache = 1;
1092         if (isize > osd->od_readcache_max_filesize)
1093                 cache = 0;
1094
1095         start = ktime_get();
1096         for (i = 0; i < npages; i++) {
1097
1098                 if (cache == 0)
1099                         generic_error_remove_page(inode->i_mapping,
1100                                                   lnb[i].lnb_page);
1101
1102                 /*
1103                  * till commit the content of the page is undefined
1104                  * we'll set it uptodate once bulk is done. otherwise
1105                  * subsequent reads can access non-stable data
1106                  */
1107                 ClearPageUptodate(lnb[i].lnb_page);
1108
1109                 if (lnb[i].lnb_len == PAGE_SIZE)
1110                         continue;
1111
1112                 if (maxidx >= lnb[i].lnb_page->index) {
1113                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1114                 } else {
1115                         long off;
1116                         char *p = kmap(lnb[i].lnb_page);
1117
1118                         off = lnb[i].lnb_page_offset;
1119                         if (off)
1120                                 memset(p, 0, off);
1121                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1122                               ~PAGE_MASK;
1123                         if (off)
1124                                 memset(p + off, 0, PAGE_SIZE - off);
1125                         kunmap(lnb[i].lnb_page);
1126                 }
1127         }
1128         end = ktime_get();
1129         timediff = ktime_us_delta(end, start);
1130         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1131
1132         if (iobuf->dr_npages) {
1133                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1134                                                  iobuf->dr_npages,
1135                                                  iobuf->dr_blocks, 0);
1136                 if (likely(rc == 0)) {
1137                         rc = osd_do_bio(osd, inode, iobuf);
1138                         /* do IO stats for preparation reads */
1139                         osd_fini_iobuf(osd, iobuf);
1140                 }
1141         }
1142         RETURN(rc);
1143 }
1144
1145 struct osd_fextent {
1146         sector_t        start;
1147         sector_t        end;
1148         unsigned int    mapped:1;
1149 };
1150
1151 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1152                          struct osd_fextent *cached_extent)
1153 {
1154         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1155         sector_t block = offset >> inode->i_blkbits;
1156         sector_t start;
1157         struct fiemap_extent_info fei = { 0 };
1158         struct fiemap_extent fe = { 0 };
1159         mm_segment_t saved_fs;
1160         int rc;
1161
1162         if (block >= cached_extent->start && block < cached_extent->end)
1163                 return cached_extent->mapped;
1164
1165         if (i_size_read(inode) == 0)
1166                 return 0;
1167
1168         /* Beyond EOF, must not be mapped */
1169         if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1170                 return 0;
1171
1172         fei.fi_extents_max = 1;
1173         fei.fi_extents_start = &fe;
1174
1175         saved_fs = get_fs();
1176         set_fs(get_ds());
1177         rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1178         set_fs(saved_fs);
1179         if (rc != 0)
1180                 return 0;
1181
1182         start = fe.fe_logical >> inode->i_blkbits;
1183
1184         if (start > block) {
1185                 cached_extent->start = block;
1186                 cached_extent->end = start;
1187                 cached_extent->mapped = 0;
1188         } else {
1189                 cached_extent->start = start;
1190                 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1191                                       inode->i_blkbits;
1192                 cached_extent->mapped = 1;
1193         }
1194
1195         return cached_extent->mapped;
1196 }
1197
1198 static int osd_declare_write_commit(const struct lu_env *env,
1199                                     struct dt_object *dt,
1200                                     struct niobuf_local *lnb, int npages,
1201                                     struct thandle *handle)
1202 {
1203         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1204         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1205         struct osd_thandle      *oh;
1206         int                     extents = 1;
1207         int                     depth;
1208         int                     i;
1209         int                     newblocks;
1210         int                     rc = 0;
1211         int                     flags = 0;
1212         int                     credits = 0;
1213         long long               quota_space = 0;
1214         struct osd_fextent      extent = { 0 };
1215         enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1216         ENTRY;
1217
1218         LASSERT(handle != NULL);
1219         oh = container_of0(handle, struct osd_thandle, ot_super);
1220         LASSERT(oh->ot_handle == NULL);
1221
1222         newblocks = npages;
1223
1224         /* calculate number of extents (probably better to pass nb) */
1225         for (i = 0; i < npages; i++) {
1226                 if (i && lnb[i].lnb_file_offset !=
1227                     lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
1228                         extents++;
1229
1230                 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent))
1231                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1232                 else
1233                         quota_space += PAGE_SIZE;
1234
1235                 /* ignore quota for the whole request if any page is from
1236                  * client cache or written by root.
1237                  *
1238                  * XXX once we drop the 1.8 client support, the checking
1239                  * for whether page is from cache can be simplified as:
1240                  * !(lnb[i].flags & OBD_BRW_SYNC)
1241                  *
1242                  * XXX we could handle this on per-lnb basis as done by
1243                  * grant. */
1244                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1245                     (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1246                     OBD_BRW_FROM_GRANT)
1247                         declare_flags |= OSD_QID_FORCE;
1248         }
1249
1250         /*
1251          * each extent can go into new leaf causing a split
1252          * 5 is max tree depth: inode + 4 index blocks
1253          * with blockmaps, depth is 3 at most
1254          */
1255         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1256                 /*
1257                  * many concurrent threads may grow tree by the time
1258                  * our transaction starts. so, consider 2 is a min depth
1259                  */
1260                 depth = ext_depth(inode);
1261                 depth = max(depth, 1) + 1;
1262                 newblocks += depth;
1263                 credits++; /* inode */
1264                 credits += depth * 2 * extents;
1265         } else {
1266                 depth = 3;
1267                 newblocks += depth;
1268                 credits++; /* inode */
1269                 credits += depth * extents;
1270         }
1271
1272         /* quota space for metadata blocks */
1273         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1274
1275         /* quota space should be reported in 1K blocks */
1276         quota_space = toqb(quota_space);
1277
1278         /* each new block can go in different group (bitmap + gd) */
1279
1280         /* we can't dirty more bitmap blocks than exist */
1281         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1282                 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1283         else
1284                 credits += newblocks;
1285
1286         /* we can't dirty more gd blocks than exist */
1287         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1288                 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1289         else
1290                 credits += newblocks;
1291
1292         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1293
1294         /* make sure the over quota flags were not set */
1295         lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1296
1297         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1298                                    i_projid_read(inode), quota_space, oh,
1299                                    osd_dt_obj(dt), &flags, declare_flags);
1300
1301         /* we need only to store the overquota flags in the first lnb for
1302          * now, once we support multiple objects BRW, this code needs be
1303          * revised. */
1304         if (flags & QUOTA_FL_OVER_USRQUOTA)
1305                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1306         if (flags & QUOTA_FL_OVER_GRPQUOTA)
1307                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1308         if (flags & QUOTA_FL_OVER_PRJQUOTA)
1309                 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1310
1311         RETURN(rc);
1312 }
1313
1314 /* Check if a block is allocated or not */
1315 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1316                             struct niobuf_local *lnb, int npages,
1317                             struct thandle *thandle)
1318 {
1319         struct osd_thread_info *oti = osd_oti_get(env);
1320         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1321         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1322         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1323         loff_t isize;
1324         int rc = 0, i;
1325
1326         LASSERT(inode);
1327
1328         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1329         if (unlikely(rc != 0))
1330                 RETURN(rc);
1331
1332         isize = i_size_read(inode);
1333         ll_vfs_dq_init(inode);
1334
1335         for (i = 0; i < npages; i++) {
1336                 if (lnb[i].lnb_rc == -ENOSPC &&
1337                     (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1338                         /* Allow the write to proceed if overwriting an
1339                          * existing block */
1340                         lnb[i].lnb_rc = 0;
1341                 }
1342
1343                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1344                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1345                                lnb[i].lnb_rc);
1346                         LASSERT(lnb[i].lnb_page);
1347                         generic_error_remove_page(inode->i_mapping,
1348                                                   lnb[i].lnb_page);
1349                         continue;
1350                 }
1351
1352                 LASSERT(PageLocked(lnb[i].lnb_page));
1353                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1354
1355                 if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize)
1356                         isize = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1357
1358                 /*
1359                  * Since write and truncate are serialized by oo_sem, even
1360                  * partial-page truncate should not leave dirty pages in the
1361                  * page cache.
1362                  */
1363                 LASSERT(!PageDirty(lnb[i].lnb_page));
1364
1365                 SetPageUptodate(lnb[i].lnb_page);
1366
1367                 osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1368         }
1369
1370         osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1371
1372         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1373                 rc = -ENOSPC;
1374         } else if (iobuf->dr_npages > 0) {
1375                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1376                                                  iobuf->dr_npages,
1377                                                  iobuf->dr_blocks, 1);
1378         } else {
1379                 /* no pages to write, no transno is needed */
1380                 thandle->th_local = 1;
1381         }
1382
1383         if (likely(rc == 0)) {
1384                 spin_lock(&inode->i_lock);
1385                 if (isize > i_size_read(inode)) {
1386                         i_size_write(inode, isize);
1387                         LDISKFS_I(inode)->i_disksize = isize;
1388                         spin_unlock(&inode->i_lock);
1389                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1390                 } else {
1391                         spin_unlock(&inode->i_lock);
1392                 }
1393
1394                 rc = osd_do_bio(osd, inode, iobuf);
1395                 /* we don't do stats here as in read path because
1396                  * write is async: we'll do this in osd_put_bufs() */
1397         } else {
1398                 osd_fini_iobuf(osd, iobuf);
1399         }
1400
1401         osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1402
1403         if (unlikely(rc != 0)) {
1404                 /* if write fails, we should drop pages from the cache */
1405                 for (i = 0; i < npages; i++) {
1406                         if (lnb[i].lnb_page == NULL)
1407                                 continue;
1408                         LASSERT(PageLocked(lnb[i].lnb_page));
1409                         generic_error_remove_page(inode->i_mapping,
1410                                                   lnb[i].lnb_page);
1411                 }
1412         }
1413
1414         RETURN(rc);
1415 }
1416
1417 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1418                          struct niobuf_local *lnb, int npages)
1419 {
1420         struct osd_thread_info *oti = osd_oti_get(env);
1421         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1422         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1423         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1424         int rc = 0, i, cache = 0, cache_hits = 0, cache_misses = 0;
1425         ktime_t start, end;
1426         s64 timediff;
1427         loff_t isize;
1428
1429         LASSERT(inode);
1430
1431         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1432         if (unlikely(rc != 0))
1433                 RETURN(rc);
1434
1435         isize = i_size_read(inode);
1436
1437         if (osd->od_read_cache)
1438                 cache = 1;
1439         if (isize > osd->od_readcache_max_filesize)
1440                 cache = 0;
1441
1442         start = ktime_get();
1443         for (i = 0; i < npages; i++) {
1444
1445                 if (isize <= lnb[i].lnb_file_offset)
1446                         /* If there's no more data, abort early.
1447                          * lnb->lnb_rc == 0, so it's easy to detect later. */
1448                         break;
1449
1450                 if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len)
1451                         lnb[i].lnb_rc = isize - lnb[i].lnb_file_offset;
1452                 else
1453                         lnb[i].lnb_rc = lnb[i].lnb_len;
1454
1455                 /* Bypass disk read if fail_loc is set properly */
1456                 if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
1457                         SetPageUptodate(lnb[i].lnb_page);
1458
1459                 if (PageUptodate(lnb[i].lnb_page)) {
1460                         cache_hits++;
1461                 } else {
1462                         cache_misses++;
1463                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1464                 }
1465
1466                 if (cache == 0)
1467                         generic_error_remove_page(inode->i_mapping,
1468                                                   lnb[i].lnb_page);
1469         }
1470         end = ktime_get();
1471         timediff = ktime_us_delta(end, start);
1472         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1473
1474         if (cache_hits != 0)
1475                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1476                                     cache_hits);
1477         if (cache_misses != 0)
1478                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1479                                     cache_misses);
1480         if (cache_hits + cache_misses != 0)
1481                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1482                                     cache_hits + cache_misses);
1483
1484         if (iobuf->dr_npages) {
1485                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1486                                                  iobuf->dr_npages,
1487                                                  iobuf->dr_blocks, 0);
1488                 rc = osd_do_bio(osd, inode, iobuf);
1489
1490                 /* IO stats will be done in osd_bufs_put() */
1491         }
1492
1493         RETURN(rc);
1494 }
1495
1496 /*
1497  * XXX: Another layering violation for now.
1498  *
1499  * We don't want to use ->f_op->read methods, because generic file write
1500  *
1501  *         - serializes on ->i_sem, and
1502  *
1503  *         - does a lot of extra work like balance_dirty_pages(),
1504  *
1505  * which doesn't work for globally shared files like /last_rcvd.
1506  */
1507 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1508 {
1509         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1510
1511         memcpy(buffer, (char *)ei->i_data, buflen);
1512
1513         return  buflen;
1514 }
1515
1516 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1517 {
1518         struct buffer_head *bh;
1519         unsigned long block;
1520         int osize;
1521         int blocksize;
1522         int csize;
1523         int boffs;
1524
1525         /* prevent reading after eof */
1526         spin_lock(&inode->i_lock);
1527         if (i_size_read(inode) < *offs + size) {
1528                 loff_t diff = i_size_read(inode) - *offs;
1529                 spin_unlock(&inode->i_lock);
1530                 if (diff < 0) {
1531                         CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
1532                                i_size_read(inode), *offs);
1533                         return -EBADR;
1534                 } else if (diff == 0) {
1535                         return 0;
1536                 } else {
1537                         size = diff;
1538                 }
1539         } else {
1540                 spin_unlock(&inode->i_lock);
1541         }
1542
1543         blocksize = 1 << inode->i_blkbits;
1544         osize = size;
1545         while (size > 0) {
1546                 block = *offs >> inode->i_blkbits;
1547                 boffs = *offs & (blocksize - 1);
1548                 csize = min(blocksize - boffs, size);
1549                 bh = __ldiskfs_bread(NULL, inode, block, 0);
1550                 if (IS_ERR(bh)) {
1551                         CERROR("%s: can't read %u@%llu on ino %lu: "
1552                                "rc = %ld\n", osd_ino2name(inode),
1553                                csize, *offs, inode->i_ino,
1554                                PTR_ERR(bh));
1555                         return PTR_ERR(bh);
1556                 }
1557
1558                 if (bh != NULL) {
1559                         memcpy(buf, bh->b_data + boffs, csize);
1560                         brelse(bh);
1561                 } else {
1562                         memset(buf, 0, csize);
1563                 }
1564
1565                 *offs += csize;
1566                 buf += csize;
1567                 size -= csize;
1568         }
1569         return osize;
1570 }
1571
1572 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1573                         struct lu_buf *buf, loff_t *pos)
1574 {
1575         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1576         int           rc;
1577
1578         /* Read small symlink from inode body as we need to maintain correct
1579          * on-disk symlinks for ldiskfs.
1580          */
1581         if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
1582             (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1583                 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
1584         else
1585                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1586
1587         return rc;
1588 }
1589
1590 static inline int osd_extents_enabled(struct super_block *sb,
1591                                       struct inode *inode)
1592 {
1593         if (inode != NULL) {
1594                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1595                         return 1;
1596         } else if (ldiskfs_has_feature_extents(sb)) {
1597                 return 1;
1598         }
1599         return 0;
1600 }
1601
1602 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1603                            const loff_t size, const loff_t pos,
1604                            const int blocks)
1605 {
1606         int credits, bits, bs, i;
1607
1608         bits = sb->s_blocksize_bits;
1609         bs = 1 << bits;
1610
1611         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1612          * we do not expect blockmaps on the large files,
1613          * so let's shrink it to 2 levels (4GB files) */
1614
1615         /* this is default reservation: 2 levels */
1616         credits = (blocks + 2) * 3;
1617
1618         /* actual offset is unknown, hard to optimize */
1619         if (pos == -1)
1620                 return credits;
1621
1622         /* now check for few specific cases to optimize */
1623         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1624                 /* no indirects */
1625                 credits = blocks;
1626                 /* allocate if not allocated */
1627                 if (inode == NULL) {
1628                         credits += blocks * 2;
1629                         return credits;
1630                 }
1631                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1632                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1633                         if (LDISKFS_I(inode)->i_data[i] == 0)
1634                                 credits += 2;
1635                 }
1636         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1637                 /* single indirect */
1638                 credits = blocks * 3;
1639                 if (inode == NULL ||
1640                     LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1641                         credits += 3;
1642                 else
1643                         /* The indirect block may be modified. */
1644                         credits += 1;
1645         }
1646
1647         return credits;
1648 }
1649
1650 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1651                                  const struct lu_buf *buf, loff_t _pos,
1652                                  struct thandle *handle)
1653 {
1654         struct osd_object  *obj  = osd_dt_obj(dt);
1655         struct inode       *inode = obj->oo_inode;
1656         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1657         struct osd_thandle *oh;
1658         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1659         int                 bits, bs;
1660         int                 depth, size;
1661         loff_t              pos;
1662         ENTRY;
1663
1664         LASSERT(buf != NULL);
1665         LASSERT(handle != NULL);
1666
1667         oh = container_of0(handle, struct osd_thandle, ot_super);
1668         LASSERT(oh->ot_handle == NULL);
1669
1670         size = buf->lb_len;
1671         bits = sb->s_blocksize_bits;
1672         bs = 1 << bits;
1673
1674         if (_pos == -1) {
1675                 /* if this is an append, then we
1676                  * should expect cross-block record */
1677                 pos = 0;
1678         } else {
1679                 pos = _pos;
1680         }
1681
1682         /* blocks to modify */
1683         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1684         LASSERT(blocks > 0);
1685
1686         if (inode != NULL && _pos != -1) {
1687                 /* object size in blocks */
1688                 est = (i_size_read(inode) + bs - 1) >> bits;
1689                 allocated = inode->i_blocks >> (bits - 9);
1690                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1691                         /* looks like an overwrite, no need to modify tree */
1692                         credits = blocks;
1693                         /* no need to modify i_size */
1694                         goto out;
1695                 }
1696         }
1697
1698         if (osd_extents_enabled(sb, inode)) {
1699                 /*
1700                  * many concurrent threads may grow tree by the time
1701                  * our transaction starts. so, consider 2 is a min depth
1702                  * for every level we may need to allocate a new block
1703                  * and take some entries from the old one. so, 3 blocks
1704                  * to allocate (bitmap, gd, itself) + old block - 4 per
1705                  * level.
1706                  */
1707                 depth = inode != NULL ? ext_depth(inode) : 0;
1708                 depth = max(depth, 1) + 1;
1709                 credits = depth;
1710                 /* if not append, then split may need to modify
1711                  * existing blocks moving entries into the new ones */
1712                 if (_pos != -1)
1713                         credits += depth;
1714                 /* blocks to store data: bitmap,gd,itself */
1715                 credits += blocks * 3;
1716         } else {
1717                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1718         }
1719         /* if inode is created as part of the transaction,
1720          * then it's counted already by the creation method */
1721         if (inode != NULL)
1722                 credits++;
1723
1724 out:
1725
1726         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1727
1728         /* dt_declare_write() is usually called for system objects, such
1729          * as llog or last_rcvd files. We needn't enforce quota on those
1730          * objects, so always set the lqi_space as 0. */
1731         if (inode != NULL)
1732                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1733                                            i_gid_read(inode),
1734                                            i_projid_read(inode), 0,
1735                                            oh, obj, NULL, OSD_QID_BLK);
1736         RETURN(rc);
1737 }
1738
1739 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1740 {
1741         /* LU-2634: clear the extent format for fast symlink */
1742         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1743
1744         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1745         spin_lock(&inode->i_lock);
1746         LDISKFS_I(inode)->i_disksize = buflen;
1747         i_size_write(inode, buflen);
1748         spin_unlock(&inode->i_lock);
1749         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1750
1751         return 0;
1752 }
1753
1754 int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
1755                              int write_NUL, loff_t *offs, handle_t *handle)
1756 {
1757         struct buffer_head *bh        = NULL;
1758         loff_t              offset    = *offs;
1759         loff_t              new_size  = i_size_read(inode);
1760         unsigned long       block;
1761         int                 blocksize = 1 << inode->i_blkbits;
1762         int                 err = 0;
1763         int                 size;
1764         int                 boffs;
1765         int                 dirty_inode = 0;
1766
1767         if (write_NUL) {
1768                 /*
1769                  * long symlink write does not count the NUL terminator in
1770                  * bufsize, we write it, and the inode's file size does not
1771                  * count the NUL terminator as well.
1772                  */
1773                 ((char *)buf)[bufsize] = '\0';
1774                 ++bufsize;
1775         }
1776
1777         while (bufsize > 0) {
1778                 int credits = handle->h_buffer_credits;
1779
1780                 if (bh)
1781                         brelse(bh);
1782
1783                 block = offset >> inode->i_blkbits;
1784                 boffs = offset & (blocksize - 1);
1785                 size = min(blocksize - boffs, bufsize);
1786                 bh = __ldiskfs_bread(handle, inode, block, 1);
1787                 if (IS_ERR_OR_NULL(bh)) {
1788                         if (bh == NULL) {
1789                                 err = -EIO;
1790                         } else {
1791                                 err = PTR_ERR(bh);
1792                                 bh = NULL;
1793                         }
1794
1795                         CERROR("%s: error reading offset %llu (block %lu, "
1796                                "size %d, offs %llu), credits %d/%d: rc = %d\n",
1797                                inode->i_sb->s_id, offset, block, bufsize, *offs,
1798                                credits, handle->h_buffer_credits, err);
1799                         break;
1800                 }
1801
1802                 err = ldiskfs_journal_get_write_access(handle, bh);
1803                 if (err) {
1804                         CERROR("journal_get_write_access() returned error %d\n",
1805                                err);
1806                         break;
1807                 }
1808                 LASSERTF(boffs + size <= bh->b_size,
1809                          "boffs %d size %d bh->b_size %lu\n",
1810                          boffs, size, (unsigned long)bh->b_size);
1811                 memcpy(bh->b_data + boffs, buf, size);
1812                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
1813                 if (err)
1814                         break;
1815
1816                 if (offset + size > new_size)
1817                         new_size = offset + size;
1818                 offset += size;
1819                 bufsize -= size;
1820                 buf += size;
1821         }
1822         if (bh)
1823                 brelse(bh);
1824
1825         if (write_NUL)
1826                 --new_size;
1827         /* correct in-core and on-disk sizes */
1828         if (new_size > i_size_read(inode)) {
1829                 spin_lock(&inode->i_lock);
1830                 if (new_size > i_size_read(inode))
1831                         i_size_write(inode, new_size);
1832                 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
1833                         LDISKFS_I(inode)->i_disksize = i_size_read(inode);
1834                         dirty_inode = 1;
1835                 }
1836                 spin_unlock(&inode->i_lock);
1837                 if (dirty_inode)
1838                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1839         }
1840
1841         if (err == 0)
1842                 *offs = offset;
1843         return err;
1844 }
1845
1846 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1847                          const struct lu_buf *buf, loff_t *pos,
1848                          struct thandle *handle, int ignore_quota)
1849 {
1850         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1851         struct osd_thandle      *oh;
1852         ssize_t                 result;
1853         int                     is_link;
1854
1855         LASSERT(dt_object_exists(dt));
1856
1857         LASSERT(handle != NULL);
1858         LASSERT(inode != NULL);
1859         ll_vfs_dq_init(inode);
1860
1861         /* XXX: don't check: one declared chunk can be used many times */
1862         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
1863
1864         oh = container_of(handle, struct osd_thandle, ot_super);
1865         LASSERT(oh->ot_handle->h_transaction != NULL);
1866         osd_trans_exec_op(env, handle, OSD_OT_WRITE);
1867
1868         /* Write small symlink to inode body as we need to maintain correct
1869          * on-disk symlinks for ldiskfs.
1870          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
1871          * does not count it in.
1872          */
1873         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
1874         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1875                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
1876         else
1877                 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
1878                                                   buf->lb_len, is_link, pos,
1879                                                   oh->ot_handle);
1880         if (result == 0)
1881                 result = buf->lb_len;
1882
1883         osd_trans_exec_check(env, handle, OSD_OT_WRITE);
1884
1885         return result;
1886 }
1887
1888 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1889                              __u64 start, __u64 end, struct thandle *th)
1890 {
1891         struct osd_thandle *oh;
1892         struct inode       *inode;
1893         int                 rc;
1894         ENTRY;
1895
1896         LASSERT(th);
1897         oh = container_of(th, struct osd_thandle, ot_super);
1898
1899         /*
1900          * we don't need to reserve credits for whole truncate
1901          * it's not possible as truncate may need to free too many
1902          * blocks and that won't fit a single transaction. instead
1903          * we reserve credits to change i_size and put inode onto
1904          * orphan list. if needed truncate will extend or restart
1905          * transaction
1906          */
1907         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
1908                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
1909
1910         inode = osd_dt_obj(dt)->oo_inode;
1911         LASSERT(inode);
1912
1913         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1914                                    i_projid_read(inode), 0, oh, osd_dt_obj(dt),
1915                                    NULL, OSD_QID_BLK);
1916         RETURN(rc);
1917 }
1918
1919 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1920                      __u64 start, __u64 end, struct thandle *th)
1921 {
1922         struct osd_thandle *oh;
1923         struct osd_object  *obj = osd_dt_obj(dt);
1924         struct inode       *inode = obj->oo_inode;
1925         handle_t           *h;
1926         tid_t               tid;
1927         int                rc = 0, rc2 = 0;
1928         ENTRY;
1929
1930         LASSERT(end == OBD_OBJECT_EOF);
1931         LASSERT(dt_object_exists(dt));
1932         LASSERT(osd_invariant(obj));
1933         LASSERT(inode != NULL);
1934         ll_vfs_dq_init(inode);
1935
1936         LASSERT(th);
1937         oh = container_of(th, struct osd_thandle, ot_super);
1938         LASSERT(oh->ot_handle->h_transaction != NULL);
1939
1940         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
1941
1942         tid = oh->ot_handle->h_transaction->t_tid;
1943
1944         spin_lock(&inode->i_lock);
1945         i_size_write(inode, start);
1946         spin_unlock(&inode->i_lock);
1947         ll_truncate_pagecache(inode, start);
1948 #ifdef HAVE_INODEOPS_TRUNCATE
1949         if (inode->i_op->truncate) {
1950                 inode->i_op->truncate(inode);
1951         } else
1952 #endif
1953                 ldiskfs_truncate(inode);
1954
1955         /*
1956          * For a partial-page truncate, flush the page to disk immediately to
1957          * avoid data corruption during direct disk write.  b=17397
1958          */
1959         if ((start & ~PAGE_MASK) != 0)
1960                 rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
1961
1962         h = journal_current_handle();
1963         LASSERT(h != NULL);
1964         LASSERT(h == oh->ot_handle);
1965
1966         /* do not check credits with osd_trans_exec_check() as the truncate
1967          * can restart the transaction internally and we restart the
1968          * transaction in this case */
1969
1970         if (tid != h->h_transaction->t_tid) {
1971                 int credits = oh->ot_credits;
1972                 /*
1973                  * transaction has changed during truncate
1974                  * we need to restart the handle with our credits
1975                  */
1976                 if (h->h_buffer_credits < credits) {
1977                         if (ldiskfs_journal_extend(h, credits))
1978                                 rc2 = ldiskfs_journal_restart(h, credits);
1979                 }
1980         }
1981
1982         RETURN(rc == 0 ? rc2 : rc);
1983 }
1984
1985 static int fiemap_check_ranges(struct inode *inode,
1986                                u64 start, u64 len, u64 *new_len)
1987 {
1988         loff_t maxbytes;
1989
1990         *new_len = len;
1991
1992         if (len == 0)
1993                 return -EINVAL;
1994
1995         if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
1996                 maxbytes = inode->i_sb->s_maxbytes;
1997         else
1998                 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
1999
2000         if (start > maxbytes)
2001                 return -EFBIG;
2002
2003         /*
2004          * Shrink request scope to what the fs can actually handle.
2005          */
2006         if (len > maxbytes || (maxbytes - len) < start)
2007                 *new_len = maxbytes - start;
2008
2009         return 0;
2010 }
2011
2012 /* So that the fiemap access checks can't overflow on 32 bit machines. */
2013 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
2014
2015 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
2016                           struct fiemap *fm)
2017 {
2018         struct fiemap_extent_info fieinfo = {0, };
2019         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2020         u64 len;
2021         int rc;
2022
2023
2024         LASSERT(inode);
2025         if (inode->i_op->fiemap == NULL)
2026                 return -EOPNOTSUPP;
2027
2028         if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
2029                 return -EINVAL;
2030
2031         rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
2032         if (rc)
2033                 return rc;
2034
2035         fieinfo.fi_flags = fm->fm_flags;
2036         fieinfo.fi_extents_max = fm->fm_extent_count;
2037         fieinfo.fi_extents_start = fm->fm_extents;
2038
2039         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
2040                 filemap_write_and_wait(inode->i_mapping);
2041
2042         rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2043         fm->fm_flags = fieinfo.fi_flags;
2044         fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2045
2046         return rc;
2047 }
2048
2049 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2050                        __u64 start, __u64 end, enum lu_ladvise_type advice)
2051 {
2052         int              rc = 0;
2053         struct inode    *inode = osd_dt_obj(dt)->oo_inode;
2054         ENTRY;
2055
2056         switch (advice) {
2057         case LU_LADVISE_DONTNEED:
2058                 if (end == 0)
2059                         break;
2060                 invalidate_mapping_pages(inode->i_mapping,
2061                                          start >> PAGE_SHIFT,
2062                                          (end - 1) >> PAGE_SHIFT);
2063                 break;
2064         default:
2065                 rc = -ENOTSUPP;
2066                 break;
2067         }
2068
2069         RETURN(rc);
2070 }
2071
2072 /*
2073  * in some cases we may need declare methods for objects being created
2074  * e.g., when we create symlink
2075  */
2076 const struct dt_body_operations osd_body_ops_new = {
2077         .dbo_declare_write = osd_declare_write,
2078 };
2079
2080 const struct dt_body_operations osd_body_ops = {
2081         .dbo_read                       = osd_read,
2082         .dbo_declare_write              = osd_declare_write,
2083         .dbo_write                      = osd_write,
2084         .dbo_bufs_get                   = osd_bufs_get,
2085         .dbo_bufs_put                   = osd_bufs_put,
2086         .dbo_write_prep                 = osd_write_prep,
2087         .dbo_declare_write_commit       = osd_declare_write_commit,
2088         .dbo_write_commit               = osd_write_commit,
2089         .dbo_read_prep                  = osd_read_prep,
2090         .dbo_declare_punch              = osd_declare_punch,
2091         .dbo_punch                      = osd_punch,
2092         .dbo_fiemap_get                 = osd_fiemap_get,
2093         .dbo_ladvise                    = osd_ladvise,
2094 };