Whamcloud - gitweb
LU-10565 osd: unify interface for vfs
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd/osd_io.c
33  *
34  * body operations
35  *
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
38  *
39  */
40
41 /* prerequisite for linux/xattr.h */
42 #include <linux/types.h>
43 /* prerequisite for linux/xattr.h */
44 #include <linux/fs.h>
45 #include <linux/mm.h>
46 #include <linux/pagevec.h>
47
48 /*
49  * struct OBD_{ALLOC,FREE}*()
50  * OBD_FAIL_CHECK
51  */
52 #include <obd_support.h>
53
54 #include "osd_internal.h"
55
56 /* ext_depth() */
57 #include <ldiskfs/ldiskfs_extents.h>
58
59 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
60                             int rw, int line, int pages)
61 {
62         int blocks, i;
63
64         LASSERTF(iobuf->dr_elapsed_valid == 0,
65                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
66                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
67                  iobuf->dr_init_at);
68         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
69
70         init_waitqueue_head(&iobuf->dr_wait);
71         atomic_set(&iobuf->dr_numreqs, 0);
72         iobuf->dr_npages = 0;
73         iobuf->dr_error = 0;
74         iobuf->dr_dev = d;
75         iobuf->dr_frags = 0;
76         iobuf->dr_elapsed = ktime_set(0, 0);
77         /* must be counted before, so assert */
78         iobuf->dr_rw = rw;
79         iobuf->dr_init_at = line;
80
81         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
82         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
83                 LASSERT(iobuf->dr_pg_buf.lb_len >=
84                         pages * sizeof(iobuf->dr_pages[0]));
85                 return 0;
86         }
87
88         /* start with 1MB for 4K blocks */
89         i = 256;
90         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
91                 i <<= 1;
92
93         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
94                (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
95         pages = i;
96         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
97         iobuf->dr_max_pages = 0;
98         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
99                (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
100
101         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
102         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
103         if (unlikely(iobuf->dr_blocks == NULL))
104                 return -ENOMEM;
105
106         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
107         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
108         if (unlikely(iobuf->dr_pages == NULL))
109                 return -ENOMEM;
110
111         iobuf->dr_max_pages = pages;
112
113         return 0;
114 }
115 #define osd_init_iobuf(dev, iobuf, rw, pages) \
116         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
117
118 static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
119 {
120         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
121         iobuf->dr_pages[iobuf->dr_npages++] = page;
122 }
123
124 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
125 {
126         int rw = iobuf->dr_rw;
127
128         if (iobuf->dr_elapsed_valid) {
129                 iobuf->dr_elapsed_valid = 0;
130                 LASSERT(iobuf->dr_dev == d);
131                 LASSERT(iobuf->dr_frags > 0);
132                 lprocfs_oh_tally(&d->od_brw_stats.
133                                  hist[BRW_R_DIO_FRAGS+rw],
134                                  iobuf->dr_frags);
135                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
136                                       ktime_to_ms(iobuf->dr_elapsed));
137         }
138 }
139
140 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
141 static void dio_complete_routine(struct bio *bio)
142 {
143 # ifdef HAVE_BI_STATUS
144         int error = bio->bi_status;
145 # else
146         int error = bio->bi_error;
147 # endif
148 #else
149 static void dio_complete_routine(struct bio *bio, int error)
150 {
151 #endif
152         struct osd_iobuf *iobuf = bio->bi_private;
153         int iter;
154         struct bio_vec *bvl;
155
156         /* CAVEAT EMPTOR: possibly in IRQ context
157          * DO NOT record procfs stats here!!! */
158
159         if (unlikely(iobuf == NULL)) {
160                 CERROR("***** bio->bi_private is NULL!  This should never "
161                        "happen.  Normally, I would crash here, but instead I "
162                        "will dump the bio contents to the console.  Please "
163                        "report this to <https://jira.hpdd.intel.com/> , along "
164                        "with any interesting messages leading up to this point "
165                        "(like SCSI errors, perhaps).  Because bi_private is "
166                        "NULL, I can't wake up the thread that initiated this "
167                        "IO - you will probably have to reboot this node.\n");
168                 CERROR("bi_next: %p, bi_flags: %lx, "
169 #ifdef HAVE_BI_RW
170                        "bi_rw: %lu,"
171 #else
172                        "bi_opf: %u,"
173 #endif
174                        "bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p,"
175                        "bi_cnt: %d, bi_private: %p\n", bio->bi_next,
176                         (unsigned long)bio->bi_flags,
177 #ifdef HAVE_BI_RW
178                         bio->bi_rw,
179 #else
180                         bio->bi_opf,
181 #endif
182                         bio->bi_vcnt, bio_idx(bio),
183                         bio_sectors(bio) << 9, bio->bi_end_io,
184 #ifdef HAVE_BI_CNT
185                         atomic_read(&bio->bi_cnt),
186 #else
187                         atomic_read(&bio->__bi_cnt),
188 #endif
189                         bio->bi_private);
190                 return;
191         }
192
193         /* the check is outside of the cycle for performance reason -bzzz */
194         if (!bio_data_dir(bio)) {
195                 bio_for_each_segment_all(bvl, bio, iter) {
196                         if (likely(error == 0))
197                                 SetPageUptodate(bvl_to_page(bvl));
198                         LASSERT(PageLocked(bvl_to_page(bvl)));
199                 }
200                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
201         } else {
202                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
203         }
204
205         /* any real error is good enough -bzzz */
206         if (error != 0 && iobuf->dr_error == 0)
207                 iobuf->dr_error = error;
208
209         /*
210          * set dr_elapsed before dr_numreqs turns to 0, otherwise
211          * it's possible that service thread will see dr_numreqs
212          * is zero, but dr_elapsed is not set yet, leading to lost
213          * data in this processing and an assertion in a subsequent
214          * call to OSD.
215          */
216         if (atomic_read(&iobuf->dr_numreqs) == 1) {
217                 ktime_t now = ktime_get();
218
219                 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
220                 iobuf->dr_elapsed_valid = 1;
221         }
222         if (atomic_dec_and_test(&iobuf->dr_numreqs))
223                 wake_up(&iobuf->dr_wait);
224
225         /* Completed bios used to be chained off iobuf->dr_bios and freed in
226          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
227          * mempool when serious on-disk fragmentation was encountered,
228          * deadlocking the OST.  The bios are now released as soon as complete
229          * so the pool cannot be exhausted while IOs are competing. bug 10076 */
230         bio_put(bio);
231 }
232
233 static void record_start_io(struct osd_iobuf *iobuf, int size)
234 {
235         struct osd_device    *osd = iobuf->dr_dev;
236         struct obd_histogram *h = osd->od_brw_stats.hist;
237
238         iobuf->dr_frags++;
239         atomic_inc(&iobuf->dr_numreqs);
240
241         if (iobuf->dr_rw == 0) {
242                 atomic_inc(&osd->od_r_in_flight);
243                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
244                                  atomic_read(&osd->od_r_in_flight));
245                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
246         } else if (iobuf->dr_rw == 1) {
247                 atomic_inc(&osd->od_w_in_flight);
248                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
249                                  atomic_read(&osd->od_w_in_flight));
250                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
251         } else {
252                 LBUG();
253         }
254 }
255
256 static void osd_submit_bio(int rw, struct bio *bio)
257 {
258         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
259 #ifdef HAVE_SUBMIT_BIO_2ARGS
260         if (rw == 0)
261                 submit_bio(READ, bio);
262         else
263                 submit_bio(WRITE, bio);
264 #else
265         bio->bi_opf |= rw;
266         submit_bio(bio);
267 #endif
268 }
269
270 static int can_be_merged(struct bio *bio, sector_t sector)
271 {
272         if (bio == NULL)
273                 return 0;
274
275         return bio_end_sector(bio) == sector ? 1 : 0;
276 }
277
278 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
279                       struct osd_iobuf *iobuf)
280 {
281         int            blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
282         struct page  **pages = iobuf->dr_pages;
283         int            npages = iobuf->dr_npages;
284         sector_t      *blocks = iobuf->dr_blocks;
285         int            total_blocks = npages * blocks_per_page;
286         int            sector_bits = inode->i_sb->s_blocksize_bits - 9;
287         unsigned int   blocksize = inode->i_sb->s_blocksize;
288         struct bio    *bio = NULL;
289         struct page   *page;
290         unsigned int   page_offset;
291         sector_t       sector;
292         int            nblocks;
293         int            block_idx;
294         int            page_idx;
295         int            i;
296         int            rc = 0;
297         DECLARE_PLUG(plug);
298         ENTRY;
299
300         LASSERT(iobuf->dr_npages == npages);
301
302         osd_brw_stats_update(osd, iobuf);
303         iobuf->dr_start_time = ktime_get();
304
305         blk_start_plug(&plug);
306         for (page_idx = 0, block_idx = 0;
307              page_idx < npages;
308              page_idx++, block_idx += blocks_per_page) {
309
310                 page = pages[page_idx];
311                 LASSERT(block_idx + blocks_per_page <= total_blocks);
312
313                 for (i = 0, page_offset = 0;
314                      i < blocks_per_page;
315                      i += nblocks, page_offset += blocksize * nblocks) {
316
317                         nblocks = 1;
318
319                         if (blocks[block_idx + i] == 0) {  /* hole */
320                                 LASSERTF(iobuf->dr_rw == 0,
321                                          "page_idx %u, block_idx %u, i %u\n",
322                                          page_idx, block_idx, i);
323                                 memset(kmap(page) + page_offset, 0, blocksize);
324                                 kunmap(page);
325                                 continue;
326                         }
327
328                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
329
330                         /* Additional contiguous file blocks? */
331                         while (i + nblocks < blocks_per_page &&
332                                (sector + (nblocks << sector_bits)) ==
333                                ((sector_t)blocks[block_idx + i + nblocks] <<
334                                 sector_bits))
335                                 nblocks++;
336
337                         if (bio != NULL &&
338                             can_be_merged(bio, sector) &&
339                             bio_add_page(bio, page,
340                                          blocksize * nblocks, page_offset) != 0)
341                                 continue;       /* added this frag OK */
342
343                         if (bio != NULL) {
344                                 struct request_queue *q =
345                                         bdev_get_queue(bio->bi_bdev);
346                                 unsigned int bi_size = bio_sectors(bio) << 9;
347
348                                 /* Dang! I have to fragment this I/O */
349                                 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
350                                        "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
351                                        bi_size, bio->bi_vcnt, bio->bi_max_vecs,
352                                        bio_sectors(bio),
353                                        queue_max_sectors(q),
354                                        bio_phys_segments(q, bio),
355                                        queue_max_phys_segments(q),
356                                        0, queue_max_hw_segments(q));
357                                 record_start_io(iobuf, bi_size);
358                                 osd_submit_bio(iobuf->dr_rw, bio);
359                         }
360
361                         /* allocate new bio */
362                         bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
363                                                       (npages - page_idx) *
364                                                       blocks_per_page));
365                         if (bio == NULL) {
366                                 CERROR("Can't allocate bio %u*%u = %u pages\n",
367                                        (npages - page_idx), blocks_per_page,
368                                        (npages - page_idx) * blocks_per_page);
369                                 rc = -ENOMEM;
370                                 goto out;
371                         }
372
373                         bio->bi_bdev = inode->i_sb->s_bdev;
374                         bio_set_sector(bio, sector);
375 #ifdef HAVE_BI_RW
376                         bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
377 #else
378                         bio->bi_opf = (iobuf->dr_rw == 0) ? READ : WRITE;
379 #endif
380                         bio->bi_end_io = dio_complete_routine;
381                         bio->bi_private = iobuf;
382
383                         rc = bio_add_page(bio, page,
384                                           blocksize * nblocks, page_offset);
385                         LASSERT(rc != 0);
386                 }
387         }
388
389         if (bio != NULL) {
390                 record_start_io(iobuf, bio_sectors(bio) << 9);
391                 osd_submit_bio(iobuf->dr_rw, bio);
392                 rc = 0;
393         }
394
395 out:
396         blk_finish_plug(&plug);
397
398         /* in order to achieve better IO throughput, we don't wait for writes
399          * completion here. instead we proceed with transaction commit in
400          * parallel and wait for IO completion once transaction is stopped
401          * see osd_trans_stop() for more details -bzzz */
402         if (iobuf->dr_rw == 0) {
403                 wait_event(iobuf->dr_wait,
404                            atomic_read(&iobuf->dr_numreqs) == 0);
405                 osd_fini_iobuf(osd, iobuf);
406         }
407
408         if (rc == 0)
409                 rc = iobuf->dr_error;
410         RETURN(rc);
411 }
412
413 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
414                                    struct niobuf_local *lnb)
415 {
416         ENTRY;
417
418         *nrpages = 0;
419
420         while (len > 0) {
421                 int poff = offset & (PAGE_SIZE - 1);
422                 int plen = PAGE_SIZE - poff;
423
424                 if (plen > len)
425                         plen = len;
426                 lnb->lnb_file_offset = offset;
427                 lnb->lnb_page_offset = poff;
428                 lnb->lnb_len = plen;
429                 /* lnb->lnb_flags = rnb->rnb_flags; */
430                 lnb->lnb_flags = 0;
431                 lnb->lnb_page = NULL;
432                 lnb->lnb_rc = 0;
433
434                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
435                          (long long) len);
436                 offset += plen;
437                 len -= plen;
438                 lnb++;
439                 (*nrpages)++;
440         }
441
442         RETURN(0);
443 }
444
445 static struct page *osd_get_page(struct dt_object *dt, loff_t offset,
446                                  gfp_t gfp_mask)
447 {
448         struct inode *inode = osd_dt_obj(dt)->oo_inode;
449         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
450         struct page *page;
451
452         LASSERT(inode);
453
454         page = find_or_create_page(inode->i_mapping, offset >> PAGE_SHIFT,
455                                    gfp_mask);
456
457         if (unlikely(page == NULL))
458                 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
459
460         return page;
461 }
462
463 /*
464  * there are following "locks":
465  * journal_start
466  * i_mutex
467  * page lock
468  *
469  * osd write path:
470  *  - lock page(s)
471  *  - journal_start
472  *  - truncate_sem
473  *
474  * ext4 vmtruncate:
475  *  - lock pages, unlock
476  *  - journal_start
477  *  - lock partial page
478  *  - i_data_sem
479  *
480  */
481
482 /**
483  * Unlock and release pages loaded by osd_bufs_get()
484  *
485  * Unlock \a npages pages from \a lnb and drop the refcount on them.
486  *
487  * \param env           thread execution environment
488  * \param dt            dt object undergoing IO (OSD object + methods)
489  * \param lnb           array of pages undergoing IO
490  * \param npages        number of pages in \a lnb
491  *
492  * \retval 0            always
493  */
494 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
495                         struct niobuf_local *lnb, int npages)
496 {
497         struct pagevec pvec;
498         int i;
499
500 #ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
501         pagevec_init(&pvec);
502 #else
503         pagevec_init(&pvec, 0);
504 #endif
505
506         for (i = 0; i < npages; i++) {
507                 if (lnb[i].lnb_page == NULL)
508                         continue;
509                 LASSERT(PageLocked(lnb[i].lnb_page));
510                 unlock_page(lnb[i].lnb_page);
511                 if (pagevec_add(&pvec, lnb[i].lnb_page) == 0)
512                         pagevec_release(&pvec);
513                 dt_object_put(env, dt);
514                 lnb[i].lnb_page = NULL;
515         }
516
517         /* Release any partial pagevec */
518         pagevec_release(&pvec);
519
520         RETURN(0);
521 }
522
523 /**
524  * Load and lock pages undergoing IO
525  *
526  * Pages as described in the \a lnb array are fetched (from disk or cache)
527  * and locked for IO by the caller.
528  *
529  * DLM locking protects us from write and truncate competing for same region,
530  * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
531  * It's possible the writeout on a such a page is in progress when we access
532  * it. It's also possible that during this writeout we put new (partial) data
533  * into the page, but won't be able to proceed in filter_commitrw_write().
534  * Therefore, just wait for writeout completion as it should be rare enough.
535  *
536  * \param env           thread execution environment
537  * \param dt            dt object undergoing IO (OSD object + methods)
538  * \param pos           byte offset of IO start
539  * \param len           number of bytes of IO
540  * \param lnb           array of extents undergoing IO
541  * \param rw            read or write operation, and other flags
542  * \param capa          capabilities
543  *
544  * \retval pages        (zero or more) loaded successfully
545  * \retval -ENOMEM      on memory/page allocation error
546  */
547 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
548                         loff_t pos, ssize_t len, struct niobuf_local *lnb,
549                         enum dt_bufs_type rw)
550 {
551         struct osd_object *obj = osd_dt_obj(dt);
552         int npages, i, rc = 0;
553         gfp_t gfp_mask;
554
555         LASSERT(obj->oo_inode);
556
557         osd_map_remote_to_local(pos, len, &npages, lnb);
558
559         /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
560         gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
561                                              GFP_HIGHUSER;
562         for (i = 0; i < npages; i++, lnb++) {
563                 lnb->lnb_page = osd_get_page(dt, lnb->lnb_file_offset,
564                                              gfp_mask);
565                 if (lnb->lnb_page == NULL)
566                         GOTO(cleanup, rc = -ENOMEM);
567
568                 wait_on_page_writeback(lnb->lnb_page);
569                 BUG_ON(PageWriteback(lnb->lnb_page));
570
571                 lu_object_get(&dt->do_lu);
572         }
573
574         RETURN(i);
575
576 cleanup:
577         if (i > 0)
578                 osd_bufs_put(env, dt, lnb - i, i);
579         return rc;
580 }
581
582 #ifndef HAVE_LDISKFS_MAP_BLOCKS
583
584 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
585 #define ldiskfs_ext_pblock(ex) ext_pblock((ex))
586 #endif
587
588 struct bpointers {
589         sector_t *blocks;
590         unsigned long start;
591         int num;
592         int init_num;
593         int create;
594 };
595
596 static long ldiskfs_ext_find_goal(struct inode *inode,
597                                   struct ldiskfs_ext_path *path,
598                                   unsigned long block, int *aflags)
599 {
600         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
601         unsigned long bg_start;
602         unsigned long colour;
603         int depth;
604
605         if (path) {
606                 struct ldiskfs_extent *ex;
607                 depth = path->p_depth;
608
609                 /* try to predict block placement */
610                 if ((ex = path[depth].p_ext))
611                         return ldiskfs_ext_pblock(ex) +
612                                 (block - le32_to_cpu(ex->ee_block));
613
614                 /* it looks index is empty
615                  * try to find starting from index itself */
616                 if (path[depth].p_bh)
617                         return path[depth].p_bh->b_blocknr;
618         }
619
620         /* OK. use inode's group */
621         bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) +
622                 le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block);
623         colour = (current->pid % 16) *
624                 (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16);
625         return bg_start + colour + block;
626 }
627
628 static unsigned long new_blocks(handle_t *handle, struct inode *inode,
629                                 struct ldiskfs_ext_path *path,
630                                 unsigned long block, unsigned long *count,
631                                 int *err)
632 {
633         struct ldiskfs_allocation_request ar;
634         unsigned long pblock;
635         int aflags;
636
637         /* find neighbour allocated blocks */
638         ar.lleft = block;
639         *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
640         if (*err)
641                 return 0;
642         ar.lright = block;
643         *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright);
644         if (*err)
645                 return 0;
646
647         /* allocate new block */
648         ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags);
649         ar.inode = inode;
650         ar.logical = block;
651         ar.len = *count;
652         ar.flags = LDISKFS_MB_HINT_DATA;
653         pblock = ldiskfs_mb_new_blocks(handle, &ar, err);
654         *count = ar.len;
655         return pblock;
656 }
657
658 static int ldiskfs_ext_new_extent_cb(struct inode *inode,
659                                      struct ldiskfs_ext_path *path,
660                                      struct ldiskfs_ext_cache *cex,
661 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
662                                      struct ldiskfs_extent *ex,
663 #endif
664                                      void *cbdata)
665 {
666         struct bpointers *bp = cbdata;
667         struct ldiskfs_extent nex;
668         unsigned long pblock = 0;
669         unsigned long tgen;
670         int err, i;
671         unsigned long count;
672         handle_t *handle;
673
674 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
675         if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) {
676 #else
677         if ((cex->ec_len != 0) && (cex->ec_start != 0)) {
678 #endif
679                 err = EXT_CONTINUE;
680                 goto map;
681         }
682
683         if (bp->create == 0) {
684                 i = 0;
685                 if (cex->ec_block < bp->start)
686                         i = bp->start - cex->ec_block;
687                 if (i >= cex->ec_len)
688                         CERROR("nothing to do?! i = %d, e_num = %u\n",
689                                         i, cex->ec_len);
690                 for (; i < cex->ec_len && bp->num; i++) {
691                         *(bp->blocks) = 0;
692                         bp->blocks++;
693                         bp->num--;
694                         bp->start++;
695                 }
696
697                 return EXT_CONTINUE;
698         }
699
700         tgen = LDISKFS_I(inode)->i_ext_generation;
701         count = ldiskfs_ext_calc_credits_for_insert(inode, path);
702
703         handle = osd_journal_start(inode, LDISKFS_HT_MISC,
704                                    count + LDISKFS_ALLOC_NEEDED + 1);
705         if (IS_ERR(handle)) {
706                 return PTR_ERR(handle);
707         }
708
709         if (tgen != LDISKFS_I(inode)->i_ext_generation) {
710                 /* the tree has changed. so path can be invalid at moment */
711                 ldiskfs_journal_stop(handle);
712                 return EXT_REPEAT;
713         }
714
715         /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not
716          * protected by i_data_sem as whole. so we patch it to store
717          * generation to path and now verify the tree hasn't changed */
718         down_write((&LDISKFS_I(inode)->i_data_sem));
719
720         /* validate extent, make sure the extent tree does not changed */
721         if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) {
722                 /* cex is invalid, try again */
723                 up_write(&LDISKFS_I(inode)->i_data_sem);
724                 ldiskfs_journal_stop(handle);
725                 return EXT_REPEAT;
726         }
727
728         count = cex->ec_len;
729         pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err);
730         if (!pblock)
731                 goto out;
732         BUG_ON(count > cex->ec_len);
733
734         /* insert new extent */
735         nex.ee_block = cpu_to_le32(cex->ec_block);
736         ldiskfs_ext_store_pblock(&nex, pblock);
737         nex.ee_len = cpu_to_le16(count);
738         err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0);
739         if (err) {
740                 /* free data blocks we just allocated */
741                 /* not a good idea to call discard here directly,
742                  * but otherwise we'd need to call it every free() */
743                 ldiskfs_discard_preallocations(inode);
744 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
745                 ldiskfs_free_blocks(handle, inode, NULL,
746                                     ldiskfs_ext_pblock(&nex),
747                                     le16_to_cpu(nex.ee_len), 0);
748 #else
749                 ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex),
750                                     le16_to_cpu(nex.ee_len), 0);
751 #endif
752                 goto out;
753         }
754
755         /*
756          * Putting len of the actual extent we just inserted,
757          * we are asking ldiskfs_ext_walk_space() to continue
758          * scaning after that block
759          */
760         cex->ec_len = le16_to_cpu(nex.ee_len);
761         cex->ec_start = ldiskfs_ext_pblock(&nex);
762         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
763         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
764
765 out:
766         up_write((&LDISKFS_I(inode)->i_data_sem));
767         ldiskfs_journal_stop(handle);
768 map:
769         if (err >= 0) {
770                 /* map blocks */
771                 if (bp->num == 0) {
772                         CERROR("hmm. why do we find this extent?\n");
773                         CERROR("initial space: %lu:%u\n",
774                                 bp->start, bp->init_num);
775 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
776                         CERROR("current extent: %u/%u/%llu %d\n",
777                                 cex->ec_block, cex->ec_len,
778                                 (unsigned long long)cex->ec_start,
779                                 cex->ec_type);
780 #else
781                         CERROR("current extent: %u/%u/%llu\n",
782                                 cex->ec_block, cex->ec_len,
783                                 (unsigned long long)cex->ec_start);
784 #endif
785                 }
786                 i = 0;
787                 if (cex->ec_block < bp->start)
788                         i = bp->start - cex->ec_block;
789                 if (i >= cex->ec_len)
790                         CERROR("nothing to do?! i = %d, e_num = %u\n",
791                                         i, cex->ec_len);
792                 for (; i < cex->ec_len && bp->num; i++) {
793                         *(bp->blocks) = cex->ec_start + i;
794                         if (pblock != 0) {
795                                 /* unmap any possible underlying metadata from
796                                  * the block device mapping.  bug 6998. */
797 #ifndef HAVE_CLEAN_BDEV_ALIASES
798                                 unmap_underlying_metadata(inode->i_sb->s_bdev,
799                                                           *(bp->blocks));
800 #else
801                                 clean_bdev_aliases(inode->i_sb->s_bdev,
802                                                    *(bp->blocks), 1);
803 #endif
804                         }
805                         bp->blocks++;
806                         bp->num--;
807                         bp->start++;
808                 }
809         }
810         return err;
811 }
812
813 static int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long index,
814                                    int clen, sector_t *blocks, int create)
815 {
816         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
817         struct bpointers bp;
818         int err;
819
820         if (index + clen >= inode->i_sb->s_maxbytes >> PAGE_SHIFT)
821                 return -EFBIG;
822
823         bp.blocks = blocks;
824         bp.start = index * blocks_per_page;
825         bp.init_num = bp.num = clen * blocks_per_page;
826         bp.create = create;
827
828         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
829                bp.start, bp.start + bp.num - 1, (unsigned)inode->i_ino);
830
831         err = ldiskfs_ext_walk_space(inode, bp.start, bp.num,
832                                      ldiskfs_ext_new_extent_cb, &bp);
833         ldiskfs_ext_invalidate_cache(inode);
834
835         return err;
836 }
837
838 static int osd_ldiskfs_map_bm_inode_pages(struct inode *inode,
839                                           struct page **page, int pages,
840                                           sector_t *blocks, int create)
841 {
842         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
843         pgoff_t bitmap_max_page_index;
844         sector_t *b;
845         int rc = 0, i;
846
847         bitmap_max_page_index = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes >>
848                                 PAGE_SHIFT;
849         for (i = 0, b = blocks; i < pages; i++, page++) {
850                 if ((*page)->index + 1 >= bitmap_max_page_index) {
851                         rc = -EFBIG;
852                         break;
853                 }
854                 rc = ldiskfs_map_inode_page(inode, *page, b, create);
855                 if (rc) {
856                         CERROR("ino %lu, blk %llu create %d: rc %d\n",
857                                inode->i_ino,
858                                (unsigned long long)*b, create, rc);
859                         break;
860                 }
861                 b += blocks_per_page;
862         }
863         return rc;
864 }
865
866 static int osd_ldiskfs_map_ext_inode_pages(struct inode *inode,
867                                            struct page **page,
868                                            int pages, sector_t *blocks,
869                                            int create)
870 {
871         int rc = 0, i = 0, clen = 0;
872         struct page *fp = NULL;
873
874         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
875                 inode->i_ino, pages, (*page)->index);
876
877         /* pages are sorted already. so, we just have to find
878          * contig. space and process them properly */
879         while (i < pages) {
880                 if (fp == NULL) {
881                         /* start new extent */
882                         fp = *page++;
883                         clen = 1;
884                         i++;
885                         continue;
886                 } else if (fp->index + clen == (*page)->index) {
887                         /* continue the extent */
888                         page++;
889                         clen++;
890                         i++;
891                         continue;
892                 }
893
894                 /* process found extent */
895                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
896                                              blocks, create);
897                 if (rc)
898                         GOTO(cleanup, rc);
899
900                 /* look for next extent */
901                 fp = NULL;
902                 blocks += clen * (PAGE_SIZE >> inode->i_blkbits);
903         }
904
905         if (fp)
906                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
907                                              blocks, create);
908
909 cleanup:
910         return rc;
911 }
912
913 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
914                                        int pages, sector_t *blocks,
915                                        int create)
916 {
917         int rc;
918
919         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
920                 rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
921                                                      blocks, create);
922                 return rc;
923         }
924         rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create);
925
926         return rc;
927 }
928 #else
929 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
930                                        int pages, sector_t *blocks,
931                                        int create)
932 {
933         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
934         int rc = 0, i = 0;
935         struct page *fp = NULL;
936         int clen = 0;
937         pgoff_t max_page_index;
938         handle_t *handle = NULL;
939
940         max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
941
942         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
943                 inode->i_ino, pages, (*page)->index);
944
945         if (create) {
946                 create = LDISKFS_GET_BLOCKS_CREATE;
947                 handle = ldiskfs_journal_current_handle();
948                 LASSERT(handle != NULL);
949                 rc = osd_attach_jinode(inode);
950                 if (rc)
951                         return rc;
952         }
953         /* pages are sorted already. so, we just have to find
954          * contig. space and process them properly */
955         while (i < pages) {
956                 long blen, total = 0;
957                 struct ldiskfs_map_blocks map = { 0 };
958
959                 if (fp == NULL) { /* start new extent */
960                         fp = *page++;
961                         clen = 1;
962                         if (++i != pages)
963                                 continue;
964                 } else if (fp->index + clen == (*page)->index) {
965                         /* continue the extent */
966                         page++;
967                         clen++;
968                         if (++i != pages)
969                                 continue;
970                 }
971                 if (fp->index + clen >= max_page_index)
972                         GOTO(cleanup, rc = -EFBIG);
973                 /* process found extent */
974                 map.m_lblk = fp->index * blocks_per_page;
975                 map.m_len = blen = clen * blocks_per_page;
976 cont_map:
977                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
978                 if (rc >= 0) {
979                         int c = 0;
980                         for (; total < blen && c < map.m_len; c++, total++) {
981                                 if (rc == 0) {
982                                         *(blocks + total) = 0;
983                                         total++;
984                                         break;
985                                 } else {
986                                         *(blocks + total) = map.m_pblk + c;
987                                         /* unmap any possible underlying
988                                          * metadata from the block device
989                                          * mapping.  bug 6998. */
990                                         if ((map.m_flags & LDISKFS_MAP_NEW) &&
991                                             create)
992 #ifndef HAVE_CLEAN_BDEV_ALIASES
993                                                 unmap_underlying_metadata(
994                                                         inode->i_sb->s_bdev,
995                                                         map.m_pblk + c);
996 #else
997                                                 clean_bdev_aliases(
998                                                         inode->i_sb->s_bdev,
999                                                         map.m_pblk + c, 1);
1000 #endif
1001                                 }
1002                         }
1003                         rc = 0;
1004                 }
1005                 if (rc == 0 && total < blen) {
1006                         map.m_lblk = fp->index * blocks_per_page + total;
1007                         map.m_len = blen - total;
1008                         goto cont_map;
1009                 }
1010                 if (rc != 0)
1011                         GOTO(cleanup, rc);
1012
1013                 /* look for next extent */
1014                 fp = NULL;
1015                 blocks += blocks_per_page * clen;
1016         }
1017 cleanup:
1018         return rc;
1019 }
1020 #endif /* HAVE_LDISKFS_MAP_BLOCKS */
1021
1022 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1023                           struct niobuf_local *lnb, int npages)
1024 {
1025         struct osd_thread_info *oti   = osd_oti_get(env);
1026         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
1027         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
1028         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
1029         ktime_t start;
1030         ktime_t end;
1031         s64 timediff;
1032         ssize_t                 isize;
1033         __s64                   maxidx;
1034         int                     rc = 0;
1035         int                     i;
1036         int                     cache = 0;
1037
1038         LASSERT(inode);
1039
1040         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1041         if (unlikely(rc != 0))
1042                 RETURN(rc);
1043
1044         isize = i_size_read(inode);
1045         maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1046
1047         if (osd->od_writethrough_cache)
1048                 cache = 1;
1049         if (isize > osd->od_readcache_max_filesize)
1050                 cache = 0;
1051
1052         start = ktime_get();
1053         for (i = 0; i < npages; i++) {
1054
1055                 if (cache == 0)
1056                         generic_error_remove_page(inode->i_mapping,
1057                                                   lnb[i].lnb_page);
1058
1059                 /*
1060                  * till commit the content of the page is undefined
1061                  * we'll set it uptodate once bulk is done. otherwise
1062                  * subsequent reads can access non-stable data
1063                  */
1064                 ClearPageUptodate(lnb[i].lnb_page);
1065
1066                 if (lnb[i].lnb_len == PAGE_SIZE)
1067                         continue;
1068
1069                 if (maxidx >= lnb[i].lnb_page->index) {
1070                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1071                 } else {
1072                         long off;
1073                         char *p = kmap(lnb[i].lnb_page);
1074
1075                         off = lnb[i].lnb_page_offset;
1076                         if (off)
1077                                 memset(p, 0, off);
1078                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1079                               ~PAGE_MASK;
1080                         if (off)
1081                                 memset(p + off, 0, PAGE_SIZE - off);
1082                         kunmap(lnb[i].lnb_page);
1083                 }
1084         }
1085         end = ktime_get();
1086         timediff = ktime_us_delta(end, start);
1087         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1088
1089         if (iobuf->dr_npages) {
1090                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1091                                                  iobuf->dr_npages,
1092                                                  iobuf->dr_blocks, 0);
1093                 if (likely(rc == 0)) {
1094                         rc = osd_do_bio(osd, inode, iobuf);
1095                         /* do IO stats for preparation reads */
1096                         osd_fini_iobuf(osd, iobuf);
1097                 }
1098         }
1099         RETURN(rc);
1100 }
1101
1102 struct osd_fextent {
1103         sector_t        start;
1104         sector_t        end;
1105         unsigned int    mapped:1;
1106 };
1107
1108 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1109                          struct osd_fextent *cached_extent)
1110 {
1111         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1112         sector_t block = offset >> inode->i_blkbits;
1113         sector_t start;
1114         struct fiemap_extent_info fei = { 0 };
1115         struct fiemap_extent fe = { 0 };
1116         mm_segment_t saved_fs;
1117         int rc;
1118
1119         if (block >= cached_extent->start && block < cached_extent->end)
1120                 return cached_extent->mapped;
1121
1122         if (i_size_read(inode) == 0)
1123                 return 0;
1124
1125         /* Beyond EOF, must not be mapped */
1126         if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1127                 return 0;
1128
1129         fei.fi_extents_max = 1;
1130         fei.fi_extents_start = &fe;
1131
1132         saved_fs = get_fs();
1133         set_fs(get_ds());
1134         rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1135         set_fs(saved_fs);
1136         if (rc != 0)
1137                 return 0;
1138
1139         start = fe.fe_logical >> inode->i_blkbits;
1140
1141         if (start > block) {
1142                 cached_extent->start = block;
1143                 cached_extent->end = start;
1144                 cached_extent->mapped = 0;
1145         } else {
1146                 cached_extent->start = start;
1147                 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1148                                       inode->i_blkbits;
1149                 cached_extent->mapped = 1;
1150         }
1151
1152         return cached_extent->mapped;
1153 }
1154
1155 static int osd_declare_write_commit(const struct lu_env *env,
1156                                     struct dt_object *dt,
1157                                     struct niobuf_local *lnb, int npages,
1158                                     struct thandle *handle)
1159 {
1160         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1161         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1162         struct osd_thandle      *oh;
1163         int                     extents = 1;
1164         int                     depth;
1165         int                     i;
1166         int                     newblocks;
1167         int                     rc = 0;
1168         int                     flags = 0;
1169         int                     credits = 0;
1170         long long               quota_space = 0;
1171         struct osd_fextent      extent = { 0 };
1172         enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1173         ENTRY;
1174
1175         LASSERT(handle != NULL);
1176         oh = container_of0(handle, struct osd_thandle, ot_super);
1177         LASSERT(oh->ot_handle == NULL);
1178
1179         newblocks = npages;
1180
1181         /* calculate number of extents (probably better to pass nb) */
1182         for (i = 0; i < npages; i++) {
1183                 if (i && lnb[i].lnb_file_offset !=
1184                     lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
1185                         extents++;
1186
1187                 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent))
1188                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1189                 else
1190                         quota_space += PAGE_SIZE;
1191
1192                 /* ignore quota for the whole request if any page is from
1193                  * client cache or written by root.
1194                  *
1195                  * XXX once we drop the 1.8 client support, the checking
1196                  * for whether page is from cache can be simplified as:
1197                  * !(lnb[i].flags & OBD_BRW_SYNC)
1198                  *
1199                  * XXX we could handle this on per-lnb basis as done by
1200                  * grant. */
1201                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1202                     (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1203                     OBD_BRW_FROM_GRANT)
1204                         declare_flags |= OSD_QID_FORCE;
1205         }
1206
1207         /*
1208          * each extent can go into new leaf causing a split
1209          * 5 is max tree depth: inode + 4 index blocks
1210          * with blockmaps, depth is 3 at most
1211          */
1212         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1213                 /*
1214                  * many concurrent threads may grow tree by the time
1215                  * our transaction starts. so, consider 2 is a min depth
1216                  */
1217                 depth = ext_depth(inode);
1218                 depth = max(depth, 1) + 1;
1219                 newblocks += depth;
1220                 credits++; /* inode */
1221                 credits += depth * 2 * extents;
1222         } else {
1223                 depth = 3;
1224                 newblocks += depth;
1225                 credits++; /* inode */
1226                 credits += depth * extents;
1227         }
1228
1229         /* quota space for metadata blocks */
1230         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1231
1232         /* quota space should be reported in 1K blocks */
1233         quota_space = toqb(quota_space);
1234
1235         /* each new block can go in different group (bitmap + gd) */
1236
1237         /* we can't dirty more bitmap blocks than exist */
1238         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1239                 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1240         else
1241                 credits += newblocks;
1242
1243         /* we can't dirty more gd blocks than exist */
1244         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1245                 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1246         else
1247                 credits += newblocks;
1248
1249         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1250
1251         /* make sure the over quota flags were not set */
1252         lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1253
1254         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1255                                    i_projid_read(inode), quota_space, oh,
1256                                    osd_dt_obj(dt), &flags, declare_flags);
1257
1258         /* we need only to store the overquota flags in the first lnb for
1259          * now, once we support multiple objects BRW, this code needs be
1260          * revised. */
1261         if (flags & QUOTA_FL_OVER_USRQUOTA)
1262                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1263         if (flags & QUOTA_FL_OVER_GRPQUOTA)
1264                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1265         if (flags & QUOTA_FL_OVER_PRJQUOTA)
1266                 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1267
1268         RETURN(rc);
1269 }
1270
1271 /* Check if a block is allocated or not */
1272 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1273                             struct niobuf_local *lnb, int npages,
1274                             struct thandle *thandle)
1275 {
1276         struct osd_thread_info *oti = osd_oti_get(env);
1277         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1278         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1279         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1280         loff_t isize;
1281         int rc = 0, i;
1282
1283         LASSERT(inode);
1284
1285         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1286         if (unlikely(rc != 0))
1287                 RETURN(rc);
1288
1289         isize = i_size_read(inode);
1290         ll_vfs_dq_init(inode);
1291
1292         for (i = 0; i < npages; i++) {
1293                 if (lnb[i].lnb_rc == -ENOSPC &&
1294                     (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1295                         /* Allow the write to proceed if overwriting an
1296                          * existing block */
1297                         lnb[i].lnb_rc = 0;
1298                 }
1299
1300                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1301                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1302                                lnb[i].lnb_rc);
1303                         LASSERT(lnb[i].lnb_page);
1304                         generic_error_remove_page(inode->i_mapping,
1305                                                   lnb[i].lnb_page);
1306                         continue;
1307                 }
1308
1309                 LASSERT(PageLocked(lnb[i].lnb_page));
1310                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1311
1312                 if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize)
1313                         isize = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1314
1315                 /*
1316                  * Since write and truncate are serialized by oo_sem, even
1317                  * partial-page truncate should not leave dirty pages in the
1318                  * page cache.
1319                  */
1320                 LASSERT(!PageDirty(lnb[i].lnb_page));
1321
1322                 SetPageUptodate(lnb[i].lnb_page);
1323
1324                 osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1325         }
1326
1327         osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1328
1329         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1330                 rc = -ENOSPC;
1331         } else if (iobuf->dr_npages > 0) {
1332                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1333                                                  iobuf->dr_npages,
1334                                                  iobuf->dr_blocks, 1);
1335         } else {
1336                 /* no pages to write, no transno is needed */
1337                 thandle->th_local = 1;
1338         }
1339
1340         if (likely(rc == 0)) {
1341                 spin_lock(&inode->i_lock);
1342                 if (isize > i_size_read(inode)) {
1343                         i_size_write(inode, isize);
1344                         LDISKFS_I(inode)->i_disksize = isize;
1345                         spin_unlock(&inode->i_lock);
1346                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1347                 } else {
1348                         spin_unlock(&inode->i_lock);
1349                 }
1350
1351                 rc = osd_do_bio(osd, inode, iobuf);
1352                 /* we don't do stats here as in read path because
1353                  * write is async: we'll do this in osd_put_bufs() */
1354         } else {
1355                 osd_fini_iobuf(osd, iobuf);
1356         }
1357
1358         osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1359
1360         if (unlikely(rc != 0)) {
1361                 /* if write fails, we should drop pages from the cache */
1362                 for (i = 0; i < npages; i++) {
1363                         if (lnb[i].lnb_page == NULL)
1364                                 continue;
1365                         LASSERT(PageLocked(lnb[i].lnb_page));
1366                         generic_error_remove_page(inode->i_mapping,
1367                                                   lnb[i].lnb_page);
1368                 }
1369         }
1370
1371         RETURN(rc);
1372 }
1373
1374 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1375                          struct niobuf_local *lnb, int npages)
1376 {
1377         struct osd_thread_info *oti = osd_oti_get(env);
1378         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1379         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1380         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1381         int rc = 0, i, cache = 0, cache_hits = 0, cache_misses = 0;
1382         ktime_t start, end;
1383         s64 timediff;
1384         loff_t isize;
1385
1386         LASSERT(inode);
1387
1388         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1389         if (unlikely(rc != 0))
1390                 RETURN(rc);
1391
1392         isize = i_size_read(inode);
1393
1394         if (osd->od_read_cache)
1395                 cache = 1;
1396         if (isize > osd->od_readcache_max_filesize)
1397                 cache = 0;
1398
1399         start = ktime_get();
1400         for (i = 0; i < npages; i++) {
1401
1402                 if (isize <= lnb[i].lnb_file_offset)
1403                         /* If there's no more data, abort early.
1404                          * lnb->lnb_rc == 0, so it's easy to detect later. */
1405                         break;
1406
1407                 if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len)
1408                         lnb[i].lnb_rc = isize - lnb[i].lnb_file_offset;
1409                 else
1410                         lnb[i].lnb_rc = lnb[i].lnb_len;
1411
1412                 /* Bypass disk read if fail_loc is set properly */
1413                 if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
1414                         SetPageUptodate(lnb[i].lnb_page);
1415
1416                 if (PageUptodate(lnb[i].lnb_page)) {
1417                         cache_hits++;
1418                 } else {
1419                         cache_misses++;
1420                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1421                 }
1422
1423                 if (cache == 0)
1424                         generic_error_remove_page(inode->i_mapping,
1425                                                   lnb[i].lnb_page);
1426         }
1427         end = ktime_get();
1428         timediff = ktime_us_delta(end, start);
1429         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1430
1431         if (cache_hits != 0)
1432                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1433                                     cache_hits);
1434         if (cache_misses != 0)
1435                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1436                                     cache_misses);
1437         if (cache_hits + cache_misses != 0)
1438                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1439                                     cache_hits + cache_misses);
1440
1441         if (iobuf->dr_npages) {
1442                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1443                                                  iobuf->dr_npages,
1444                                                  iobuf->dr_blocks, 0);
1445                 rc = osd_do_bio(osd, inode, iobuf);
1446
1447                 /* IO stats will be done in osd_bufs_put() */
1448         }
1449
1450         RETURN(rc);
1451 }
1452
1453 /*
1454  * XXX: Another layering violation for now.
1455  *
1456  * We don't want to use ->f_op->read methods, because generic file write
1457  *
1458  *         - serializes on ->i_sem, and
1459  *
1460  *         - does a lot of extra work like balance_dirty_pages(),
1461  *
1462  * which doesn't work for globally shared files like /last_rcvd.
1463  */
1464 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1465 {
1466         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1467
1468         memcpy(buffer, (char *)ei->i_data, buflen);
1469
1470         return  buflen;
1471 }
1472
1473 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1474 {
1475         struct buffer_head *bh;
1476         unsigned long block;
1477         int osize;
1478         int blocksize;
1479         int csize;
1480         int boffs;
1481
1482         /* prevent reading after eof */
1483         spin_lock(&inode->i_lock);
1484         if (i_size_read(inode) < *offs + size) {
1485                 loff_t diff = i_size_read(inode) - *offs;
1486                 spin_unlock(&inode->i_lock);
1487                 if (diff < 0) {
1488                         CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
1489                                i_size_read(inode), *offs);
1490                         return -EBADR;
1491                 } else if (diff == 0) {
1492                         return 0;
1493                 } else {
1494                         size = diff;
1495                 }
1496         } else {
1497                 spin_unlock(&inode->i_lock);
1498         }
1499
1500         blocksize = 1 << inode->i_blkbits;
1501         osize = size;
1502         while (size > 0) {
1503                 block = *offs >> inode->i_blkbits;
1504                 boffs = *offs & (blocksize - 1);
1505                 csize = min(blocksize - boffs, size);
1506                 bh = __ldiskfs_bread(NULL, inode, block, 0);
1507                 if (IS_ERR(bh)) {
1508                         CERROR("%s: can't read %u@%llu on ino %lu: "
1509                                "rc = %ld\n", osd_ino2name(inode),
1510                                csize, *offs, inode->i_ino,
1511                                PTR_ERR(bh));
1512                         return PTR_ERR(bh);
1513                 }
1514
1515                 if (bh != NULL) {
1516                         memcpy(buf, bh->b_data + boffs, csize);
1517                         brelse(bh);
1518                 } else {
1519                         memset(buf, 0, csize);
1520                 }
1521
1522                 *offs += csize;
1523                 buf += csize;
1524                 size -= csize;
1525         }
1526         return osize;
1527 }
1528
1529 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1530                         struct lu_buf *buf, loff_t *pos)
1531 {
1532         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1533         int           rc;
1534
1535         /* Read small symlink from inode body as we need to maintain correct
1536          * on-disk symlinks for ldiskfs.
1537          */
1538         if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
1539             (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1540                 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
1541         else
1542                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1543
1544         return rc;
1545 }
1546
1547 static inline int osd_extents_enabled(struct super_block *sb,
1548                                       struct inode *inode)
1549 {
1550         if (inode != NULL) {
1551                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1552                         return 1;
1553         } else if (ldiskfs_has_feature_extents(sb)) {
1554                 return 1;
1555         }
1556         return 0;
1557 }
1558
1559 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1560                            const loff_t size, const loff_t pos,
1561                            const int blocks)
1562 {
1563         int credits, bits, bs, i;
1564
1565         bits = sb->s_blocksize_bits;
1566         bs = 1 << bits;
1567
1568         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1569          * we do not expect blockmaps on the large files,
1570          * so let's shrink it to 2 levels (4GB files) */
1571
1572         /* this is default reservation: 2 levels */
1573         credits = (blocks + 2) * 3;
1574
1575         /* actual offset is unknown, hard to optimize */
1576         if (pos == -1)
1577                 return credits;
1578
1579         /* now check for few specific cases to optimize */
1580         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1581                 /* no indirects */
1582                 credits = blocks;
1583                 /* allocate if not allocated */
1584                 if (inode == NULL) {
1585                         credits += blocks * 2;
1586                         return credits;
1587                 }
1588                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1589                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1590                         if (LDISKFS_I(inode)->i_data[i] == 0)
1591                                 credits += 2;
1592                 }
1593         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1594                 /* single indirect */
1595                 credits = blocks * 3;
1596                 if (inode == NULL ||
1597                     LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1598                         credits += 3;
1599                 else
1600                         /* The indirect block may be modified. */
1601                         credits += 1;
1602         }
1603
1604         return credits;
1605 }
1606
1607 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1608                                  const struct lu_buf *buf, loff_t _pos,
1609                                  struct thandle *handle)
1610 {
1611         struct osd_object  *obj  = osd_dt_obj(dt);
1612         struct inode       *inode = obj->oo_inode;
1613         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1614         struct osd_thandle *oh;
1615         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1616         int                 bits, bs;
1617         int                 depth, size;
1618         loff_t              pos;
1619         ENTRY;
1620
1621         LASSERT(buf != NULL);
1622         LASSERT(handle != NULL);
1623
1624         oh = container_of0(handle, struct osd_thandle, ot_super);
1625         LASSERT(oh->ot_handle == NULL);
1626
1627         size = buf->lb_len;
1628         bits = sb->s_blocksize_bits;
1629         bs = 1 << bits;
1630
1631         if (_pos == -1) {
1632                 /* if this is an append, then we
1633                  * should expect cross-block record */
1634                 pos = 0;
1635         } else {
1636                 pos = _pos;
1637         }
1638
1639         /* blocks to modify */
1640         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1641         LASSERT(blocks > 0);
1642
1643         if (inode != NULL && _pos != -1) {
1644                 /* object size in blocks */
1645                 est = (i_size_read(inode) + bs - 1) >> bits;
1646                 allocated = inode->i_blocks >> (bits - 9);
1647                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1648                         /* looks like an overwrite, no need to modify tree */
1649                         credits = blocks;
1650                         /* no need to modify i_size */
1651                         goto out;
1652                 }
1653         }
1654
1655         if (osd_extents_enabled(sb, inode)) {
1656                 /*
1657                  * many concurrent threads may grow tree by the time
1658                  * our transaction starts. so, consider 2 is a min depth
1659                  * for every level we may need to allocate a new block
1660                  * and take some entries from the old one. so, 3 blocks
1661                  * to allocate (bitmap, gd, itself) + old block - 4 per
1662                  * level.
1663                  */
1664                 depth = inode != NULL ? ext_depth(inode) : 0;
1665                 depth = max(depth, 1) + 1;
1666                 credits = depth;
1667                 /* if not append, then split may need to modify
1668                  * existing blocks moving entries into the new ones */
1669                 if (_pos != -1)
1670                         credits += depth;
1671                 /* blocks to store data: bitmap,gd,itself */
1672                 credits += blocks * 3;
1673         } else {
1674                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1675         }
1676         /* if inode is created as part of the transaction,
1677          * then it's counted already by the creation method */
1678         if (inode != NULL)
1679                 credits++;
1680
1681 out:
1682
1683         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1684
1685         /* dt_declare_write() is usually called for system objects, such
1686          * as llog or last_rcvd files. We needn't enforce quota on those
1687          * objects, so always set the lqi_space as 0. */
1688         if (inode != NULL)
1689                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1690                                            i_gid_read(inode),
1691                                            i_projid_read(inode), 0,
1692                                            oh, obj, NULL, OSD_QID_BLK);
1693         RETURN(rc);
1694 }
1695
1696 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1697 {
1698         /* LU-2634: clear the extent format for fast symlink */
1699         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1700
1701         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1702         spin_lock(&inode->i_lock);
1703         LDISKFS_I(inode)->i_disksize = buflen;
1704         i_size_write(inode, buflen);
1705         spin_unlock(&inode->i_lock);
1706         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1707
1708         return 0;
1709 }
1710
1711 int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
1712                              int write_NUL, loff_t *offs, handle_t *handle)
1713 {
1714         struct buffer_head *bh        = NULL;
1715         loff_t              offset    = *offs;
1716         loff_t              new_size  = i_size_read(inode);
1717         unsigned long       block;
1718         int                 blocksize = 1 << inode->i_blkbits;
1719         int                 err = 0;
1720         int                 size;
1721         int                 boffs;
1722         int                 dirty_inode = 0;
1723
1724         if (write_NUL) {
1725                 /*
1726                  * long symlink write does not count the NUL terminator in
1727                  * bufsize, we write it, and the inode's file size does not
1728                  * count the NUL terminator as well.
1729                  */
1730                 ((char *)buf)[bufsize] = '\0';
1731                 ++bufsize;
1732         }
1733
1734         while (bufsize > 0) {
1735                 int credits = handle->h_buffer_credits;
1736
1737                 if (bh)
1738                         brelse(bh);
1739
1740                 block = offset >> inode->i_blkbits;
1741                 boffs = offset & (blocksize - 1);
1742                 size = min(blocksize - boffs, bufsize);
1743                 bh = __ldiskfs_bread(handle, inode, block, 1);
1744                 if (IS_ERR_OR_NULL(bh)) {
1745                         if (bh == NULL) {
1746                                 err = -EIO;
1747                         } else {
1748                                 err = PTR_ERR(bh);
1749                                 bh = NULL;
1750                         }
1751
1752                         CERROR("%s: error reading offset %llu (block %lu, "
1753                                "size %d, offs %llu), credits %d/%d: rc = %d\n",
1754                                inode->i_sb->s_id, offset, block, bufsize, *offs,
1755                                credits, handle->h_buffer_credits, err);
1756                         break;
1757                 }
1758
1759                 err = ldiskfs_journal_get_write_access(handle, bh);
1760                 if (err) {
1761                         CERROR("journal_get_write_access() returned error %d\n",
1762                                err);
1763                         break;
1764                 }
1765                 LASSERTF(boffs + size <= bh->b_size,
1766                          "boffs %d size %d bh->b_size %lu\n",
1767                          boffs, size, (unsigned long)bh->b_size);
1768                 memcpy(bh->b_data + boffs, buf, size);
1769                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
1770                 if (err)
1771                         break;
1772
1773                 if (offset + size > new_size)
1774                         new_size = offset + size;
1775                 offset += size;
1776                 bufsize -= size;
1777                 buf += size;
1778         }
1779         if (bh)
1780                 brelse(bh);
1781
1782         if (write_NUL)
1783                 --new_size;
1784         /* correct in-core and on-disk sizes */
1785         if (new_size > i_size_read(inode)) {
1786                 spin_lock(&inode->i_lock);
1787                 if (new_size > i_size_read(inode))
1788                         i_size_write(inode, new_size);
1789                 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
1790                         LDISKFS_I(inode)->i_disksize = i_size_read(inode);
1791                         dirty_inode = 1;
1792                 }
1793                 spin_unlock(&inode->i_lock);
1794                 if (dirty_inode)
1795                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1796         }
1797
1798         if (err == 0)
1799                 *offs = offset;
1800         return err;
1801 }
1802
1803 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1804                          const struct lu_buf *buf, loff_t *pos,
1805                          struct thandle *handle, int ignore_quota)
1806 {
1807         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1808         struct osd_thandle      *oh;
1809         ssize_t                 result;
1810         int                     is_link;
1811
1812         LASSERT(dt_object_exists(dt));
1813
1814         LASSERT(handle != NULL);
1815         LASSERT(inode != NULL);
1816         ll_vfs_dq_init(inode);
1817
1818         /* XXX: don't check: one declared chunk can be used many times */
1819         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
1820
1821         oh = container_of(handle, struct osd_thandle, ot_super);
1822         LASSERT(oh->ot_handle->h_transaction != NULL);
1823         osd_trans_exec_op(env, handle, OSD_OT_WRITE);
1824
1825         /* Write small symlink to inode body as we need to maintain correct
1826          * on-disk symlinks for ldiskfs.
1827          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
1828          * does not count it in.
1829          */
1830         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
1831         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1832                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
1833         else
1834                 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
1835                                                   buf->lb_len, is_link, pos,
1836                                                   oh->ot_handle);
1837         if (result == 0)
1838                 result = buf->lb_len;
1839
1840         osd_trans_exec_check(env, handle, OSD_OT_WRITE);
1841
1842         return result;
1843 }
1844
1845 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1846                              __u64 start, __u64 end, struct thandle *th)
1847 {
1848         struct osd_thandle *oh;
1849         struct inode       *inode;
1850         int                 rc;
1851         ENTRY;
1852
1853         LASSERT(th);
1854         oh = container_of(th, struct osd_thandle, ot_super);
1855
1856         /*
1857          * we don't need to reserve credits for whole truncate
1858          * it's not possible as truncate may need to free too many
1859          * blocks and that won't fit a single transaction. instead
1860          * we reserve credits to change i_size and put inode onto
1861          * orphan list. if needed truncate will extend or restart
1862          * transaction
1863          */
1864         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
1865                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
1866
1867         inode = osd_dt_obj(dt)->oo_inode;
1868         LASSERT(inode);
1869
1870         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1871                                    i_projid_read(inode), 0, oh, osd_dt_obj(dt),
1872                                    NULL, OSD_QID_BLK);
1873         RETURN(rc);
1874 }
1875
1876 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1877                      __u64 start, __u64 end, struct thandle *th)
1878 {
1879         struct osd_thandle *oh;
1880         struct osd_object  *obj = osd_dt_obj(dt);
1881         struct inode       *inode = obj->oo_inode;
1882         handle_t           *h;
1883         tid_t               tid;
1884         int                rc = 0, rc2 = 0;
1885         ENTRY;
1886
1887         LASSERT(end == OBD_OBJECT_EOF);
1888         LASSERT(dt_object_exists(dt));
1889         LASSERT(osd_invariant(obj));
1890         LASSERT(inode != NULL);
1891         ll_vfs_dq_init(inode);
1892
1893         LASSERT(th);
1894         oh = container_of(th, struct osd_thandle, ot_super);
1895         LASSERT(oh->ot_handle->h_transaction != NULL);
1896
1897         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
1898
1899         tid = oh->ot_handle->h_transaction->t_tid;
1900
1901         spin_lock(&inode->i_lock);
1902         i_size_write(inode, start);
1903         spin_unlock(&inode->i_lock);
1904         ll_truncate_pagecache(inode, start);
1905 #ifdef HAVE_INODEOPS_TRUNCATE
1906         if (inode->i_op->truncate) {
1907                 inode->i_op->truncate(inode);
1908         } else
1909 #endif
1910                 ldiskfs_truncate(inode);
1911
1912         /*
1913          * For a partial-page truncate, flush the page to disk immediately to
1914          * avoid data corruption during direct disk write.  b=17397
1915          */
1916         if ((start & ~PAGE_MASK) != 0)
1917                 rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
1918
1919         h = journal_current_handle();
1920         LASSERT(h != NULL);
1921         LASSERT(h == oh->ot_handle);
1922
1923         /* do not check credits with osd_trans_exec_check() as the truncate
1924          * can restart the transaction internally and we restart the
1925          * transaction in this case */
1926
1927         if (tid != h->h_transaction->t_tid) {
1928                 int credits = oh->ot_credits;
1929                 /*
1930                  * transaction has changed during truncate
1931                  * we need to restart the handle with our credits
1932                  */
1933                 if (h->h_buffer_credits < credits) {
1934                         if (ldiskfs_journal_extend(h, credits))
1935                                 rc2 = ldiskfs_journal_restart(h, credits);
1936                 }
1937         }
1938
1939         RETURN(rc == 0 ? rc2 : rc);
1940 }
1941
1942 static int fiemap_check_ranges(struct inode *inode,
1943                                u64 start, u64 len, u64 *new_len)
1944 {
1945         loff_t maxbytes;
1946
1947         *new_len = len;
1948
1949         if (len == 0)
1950                 return -EINVAL;
1951
1952         if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
1953                 maxbytes = inode->i_sb->s_maxbytes;
1954         else
1955                 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
1956
1957         if (start > maxbytes)
1958                 return -EFBIG;
1959
1960         /*
1961          * Shrink request scope to what the fs can actually handle.
1962          */
1963         if (len > maxbytes || (maxbytes - len) < start)
1964                 *new_len = maxbytes - start;
1965
1966         return 0;
1967 }
1968
1969 /* So that the fiemap access checks can't overflow on 32 bit machines. */
1970 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
1971
1972 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
1973                           struct fiemap *fm)
1974 {
1975         struct fiemap_extent_info fieinfo = {0, };
1976         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1977         u64 len;
1978         int rc;
1979
1980
1981         LASSERT(inode);
1982         if (inode->i_op->fiemap == NULL)
1983                 return -EOPNOTSUPP;
1984
1985         if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
1986                 return -EINVAL;
1987
1988         rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
1989         if (rc)
1990                 return rc;
1991
1992         fieinfo.fi_flags = fm->fm_flags;
1993         fieinfo.fi_extents_max = fm->fm_extent_count;
1994         fieinfo.fi_extents_start = fm->fm_extents;
1995
1996         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
1997                 filemap_write_and_wait(inode->i_mapping);
1998
1999         rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2000         fm->fm_flags = fieinfo.fi_flags;
2001         fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2002
2003         return rc;
2004 }
2005
2006 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2007                        __u64 start, __u64 end, enum lu_ladvise_type advice)
2008 {
2009         int              rc = 0;
2010         struct inode    *inode = osd_dt_obj(dt)->oo_inode;
2011         ENTRY;
2012
2013         switch (advice) {
2014         case LU_LADVISE_DONTNEED:
2015                 if (end == 0)
2016                         break;
2017                 invalidate_mapping_pages(inode->i_mapping,
2018                                          start >> PAGE_SHIFT,
2019                                          (end - 1) >> PAGE_SHIFT);
2020                 break;
2021         default:
2022                 rc = -ENOTSUPP;
2023                 break;
2024         }
2025
2026         RETURN(rc);
2027 }
2028
2029 /*
2030  * in some cases we may need declare methods for objects being created
2031  * e.g., when we create symlink
2032  */
2033 const struct dt_body_operations osd_body_ops_new = {
2034         .dbo_declare_write = osd_declare_write,
2035 };
2036
2037 const struct dt_body_operations osd_body_ops = {
2038         .dbo_read                       = osd_read,
2039         .dbo_declare_write              = osd_declare_write,
2040         .dbo_write                      = osd_write,
2041         .dbo_bufs_get                   = osd_bufs_get,
2042         .dbo_bufs_put                   = osd_bufs_put,
2043         .dbo_write_prep                 = osd_write_prep,
2044         .dbo_declare_write_commit       = osd_declare_write_commit,
2045         .dbo_write_commit               = osd_write_commit,
2046         .dbo_read_prep                  = osd_read_prep,
2047         .dbo_declare_punch              = osd_declare_punch,
2048         .dbo_punch                      = osd_punch,
2049         .dbo_fiemap_get                 = osd_fiemap_get,
2050         .dbo_ladvise                    = osd_ladvise,
2051 };