Whamcloud - gitweb
LU-10560 osd: bi_bdev is replaced by bi_disk
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd/osd_io.c
33  *
34  * body operations
35  *
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
38  *
39  */
40
41 /* prerequisite for linux/xattr.h */
42 #include <linux/types.h>
43 /* prerequisite for linux/xattr.h */
44 #include <linux/fs.h>
45 #include <linux/mm.h>
46 #include <linux/pagevec.h>
47
48 /*
49  * struct OBD_{ALLOC,FREE}*()
50  * OBD_FAIL_CHECK
51  */
52 #include <obd_support.h>
53
54 #include "osd_internal.h"
55
56 /* ext_depth() */
57 #include <ldiskfs/ldiskfs_extents.h>
58
59 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
60                             int rw, int line, int pages)
61 {
62         int blocks, i;
63
64         LASSERTF(iobuf->dr_elapsed_valid == 0,
65                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
66                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
67                  iobuf->dr_init_at);
68         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
69
70         init_waitqueue_head(&iobuf->dr_wait);
71         atomic_set(&iobuf->dr_numreqs, 0);
72         iobuf->dr_npages = 0;
73         iobuf->dr_error = 0;
74         iobuf->dr_dev = d;
75         iobuf->dr_frags = 0;
76         iobuf->dr_elapsed = ktime_set(0, 0);
77         /* must be counted before, so assert */
78         iobuf->dr_rw = rw;
79         iobuf->dr_init_at = line;
80
81         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
82         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
83                 LASSERT(iobuf->dr_pg_buf.lb_len >=
84                         pages * sizeof(iobuf->dr_pages[0]));
85                 return 0;
86         }
87
88         /* start with 1MB for 4K blocks */
89         i = 256;
90         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
91                 i <<= 1;
92
93         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
94                (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
95         pages = i;
96         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
97         iobuf->dr_max_pages = 0;
98         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
99                (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
100
101         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
102         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
103         if (unlikely(iobuf->dr_blocks == NULL))
104                 return -ENOMEM;
105
106         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
107         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
108         if (unlikely(iobuf->dr_pages == NULL))
109                 return -ENOMEM;
110
111         iobuf->dr_max_pages = pages;
112
113         return 0;
114 }
115 #define osd_init_iobuf(dev, iobuf, rw, pages) \
116         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
117
118 static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
119 {
120         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
121         iobuf->dr_pages[iobuf->dr_npages++] = page;
122 }
123
124 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
125 {
126         int rw = iobuf->dr_rw;
127
128         if (iobuf->dr_elapsed_valid) {
129                 iobuf->dr_elapsed_valid = 0;
130                 LASSERT(iobuf->dr_dev == d);
131                 LASSERT(iobuf->dr_frags > 0);
132                 lprocfs_oh_tally(&d->od_brw_stats.
133                                  hist[BRW_R_DIO_FRAGS+rw],
134                                  iobuf->dr_frags);
135                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
136                                       ktime_to_ms(iobuf->dr_elapsed));
137         }
138 }
139
140 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
141 static void dio_complete_routine(struct bio *bio)
142 {
143 # ifdef HAVE_BI_STATUS
144         int error = bio->bi_status;
145 # else
146         int error = bio->bi_error;
147 # endif
148 #else
149 static void dio_complete_routine(struct bio *bio, int error)
150 {
151 #endif
152         struct osd_iobuf *iobuf = bio->bi_private;
153         int iter;
154         struct bio_vec *bvl;
155
156         /* CAVEAT EMPTOR: possibly in IRQ context
157          * DO NOT record procfs stats here!!! */
158
159         if (unlikely(iobuf == NULL)) {
160                 CERROR("***** bio->bi_private is NULL!  This should never "
161                        "happen.  Normally, I would crash here, but instead I "
162                        "will dump the bio contents to the console.  Please "
163                        "report this to <https://jira.hpdd.intel.com/> , along "
164                        "with any interesting messages leading up to this point "
165                        "(like SCSI errors, perhaps).  Because bi_private is "
166                        "NULL, I can't wake up the thread that initiated this "
167                        "IO - you will probably have to reboot this node.\n");
168                 CERROR("bi_next: %p, bi_flags: %lx, "
169 #ifdef HAVE_BI_RW
170                        "bi_rw: %lu,"
171 #else
172                        "bi_opf: %u,"
173 #endif
174                        "bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p,"
175                        "bi_cnt: %d, bi_private: %p\n", bio->bi_next,
176                         (unsigned long)bio->bi_flags,
177 #ifdef HAVE_BI_RW
178                         bio->bi_rw,
179 #else
180                         bio->bi_opf,
181 #endif
182                         bio->bi_vcnt, bio_idx(bio),
183                         bio_sectors(bio) << 9, bio->bi_end_io,
184 #ifdef HAVE_BI_CNT
185                         atomic_read(&bio->bi_cnt),
186 #else
187                         atomic_read(&bio->__bi_cnt),
188 #endif
189                         bio->bi_private);
190                 return;
191         }
192
193         /* the check is outside of the cycle for performance reason -bzzz */
194         if (!bio_data_dir(bio)) {
195                 bio_for_each_segment_all(bvl, bio, iter) {
196                         if (likely(error == 0))
197                                 SetPageUptodate(bvl_to_page(bvl));
198                         LASSERT(PageLocked(bvl_to_page(bvl)));
199                 }
200                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
201         } else {
202                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
203         }
204
205         /* any real error is good enough -bzzz */
206         if (error != 0 && iobuf->dr_error == 0)
207                 iobuf->dr_error = error;
208
209         /*
210          * set dr_elapsed before dr_numreqs turns to 0, otherwise
211          * it's possible that service thread will see dr_numreqs
212          * is zero, but dr_elapsed is not set yet, leading to lost
213          * data in this processing and an assertion in a subsequent
214          * call to OSD.
215          */
216         if (atomic_read(&iobuf->dr_numreqs) == 1) {
217                 ktime_t now = ktime_get();
218
219                 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
220                 iobuf->dr_elapsed_valid = 1;
221         }
222         if (atomic_dec_and_test(&iobuf->dr_numreqs))
223                 wake_up(&iobuf->dr_wait);
224
225         /* Completed bios used to be chained off iobuf->dr_bios and freed in
226          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
227          * mempool when serious on-disk fragmentation was encountered,
228          * deadlocking the OST.  The bios are now released as soon as complete
229          * so the pool cannot be exhausted while IOs are competing. bug 10076 */
230         bio_put(bio);
231 }
232
233 static void record_start_io(struct osd_iobuf *iobuf, int size)
234 {
235         struct osd_device    *osd = iobuf->dr_dev;
236         struct obd_histogram *h = osd->od_brw_stats.hist;
237
238         iobuf->dr_frags++;
239         atomic_inc(&iobuf->dr_numreqs);
240
241         if (iobuf->dr_rw == 0) {
242                 atomic_inc(&osd->od_r_in_flight);
243                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
244                                  atomic_read(&osd->od_r_in_flight));
245                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
246         } else if (iobuf->dr_rw == 1) {
247                 atomic_inc(&osd->od_w_in_flight);
248                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
249                                  atomic_read(&osd->od_w_in_flight));
250                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
251         } else {
252                 LBUG();
253         }
254 }
255
256 static void osd_submit_bio(int rw, struct bio *bio)
257 {
258         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
259 #ifdef HAVE_SUBMIT_BIO_2ARGS
260         if (rw == 0)
261                 submit_bio(READ, bio);
262         else
263                 submit_bio(WRITE, bio);
264 #else
265         bio->bi_opf |= rw;
266         submit_bio(bio);
267 #endif
268 }
269
270 static int can_be_merged(struct bio *bio, sector_t sector)
271 {
272         if (bio == NULL)
273                 return 0;
274
275         return bio_end_sector(bio) == sector ? 1 : 0;
276 }
277
278 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
279                       struct osd_iobuf *iobuf)
280 {
281         int            blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
282         struct page  **pages = iobuf->dr_pages;
283         int            npages = iobuf->dr_npages;
284         sector_t      *blocks = iobuf->dr_blocks;
285         int            total_blocks = npages * blocks_per_page;
286         int            sector_bits = inode->i_sb->s_blocksize_bits - 9;
287         unsigned int   blocksize = inode->i_sb->s_blocksize;
288         struct bio    *bio = NULL;
289         struct page   *page;
290         unsigned int   page_offset;
291         sector_t       sector;
292         int            nblocks;
293         int            block_idx;
294         int            page_idx;
295         int            i;
296         int            rc = 0;
297         DECLARE_PLUG(plug);
298         ENTRY;
299
300         LASSERT(iobuf->dr_npages == npages);
301
302         osd_brw_stats_update(osd, iobuf);
303         iobuf->dr_start_time = ktime_get();
304
305         blk_start_plug(&plug);
306         for (page_idx = 0, block_idx = 0;
307              page_idx < npages;
308              page_idx++, block_idx += blocks_per_page) {
309
310                 page = pages[page_idx];
311                 LASSERT(block_idx + blocks_per_page <= total_blocks);
312
313                 for (i = 0, page_offset = 0;
314                      i < blocks_per_page;
315                      i += nblocks, page_offset += blocksize * nblocks) {
316
317                         nblocks = 1;
318
319                         if (blocks[block_idx + i] == 0) {  /* hole */
320                                 LASSERTF(iobuf->dr_rw == 0,
321                                          "page_idx %u, block_idx %u, i %u\n",
322                                          page_idx, block_idx, i);
323                                 memset(kmap(page) + page_offset, 0, blocksize);
324                                 kunmap(page);
325                                 continue;
326                         }
327
328                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
329
330                         /* Additional contiguous file blocks? */
331                         while (i + nblocks < blocks_per_page &&
332                                (sector + (nblocks << sector_bits)) ==
333                                ((sector_t)blocks[block_idx + i + nblocks] <<
334                                 sector_bits))
335                                 nblocks++;
336
337                         if (bio != NULL &&
338                             can_be_merged(bio, sector) &&
339                             bio_add_page(bio, page,
340                                          blocksize * nblocks, page_offset) != 0)
341                                 continue;       /* added this frag OK */
342
343                         if (bio != NULL) {
344                                 struct request_queue *q = bio_get_queue(bio);
345                                 unsigned int bi_size = bio_sectors(bio) << 9;
346
347                                 /* Dang! I have to fragment this I/O */
348                                 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
349                                        "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
350                                        bi_size, bio->bi_vcnt, bio->bi_max_vecs,
351                                        bio_sectors(bio),
352                                        queue_max_sectors(q),
353                                        bio_phys_segments(q, bio),
354                                        queue_max_phys_segments(q),
355                                        0, queue_max_hw_segments(q));
356                                 record_start_io(iobuf, bi_size);
357                                 osd_submit_bio(iobuf->dr_rw, bio);
358                         }
359
360                         /* allocate new bio */
361                         bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
362                                                       (npages - page_idx) *
363                                                       blocks_per_page));
364                         if (bio == NULL) {
365                                 CERROR("Can't allocate bio %u*%u = %u pages\n",
366                                        (npages - page_idx), blocks_per_page,
367                                        (npages - page_idx) * blocks_per_page);
368                                 rc = -ENOMEM;
369                                 goto out;
370                         }
371
372                         bio_set_dev(bio, inode->i_sb->s_bdev);
373                         bio_set_sector(bio, sector);
374 #ifdef HAVE_BI_RW
375                         bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
376 #else
377                         bio->bi_opf = (iobuf->dr_rw == 0) ? READ : WRITE;
378 #endif
379                         bio->bi_end_io = dio_complete_routine;
380                         bio->bi_private = iobuf;
381
382                         rc = bio_add_page(bio, page,
383                                           blocksize * nblocks, page_offset);
384                         LASSERT(rc != 0);
385                 }
386         }
387
388         if (bio != NULL) {
389                 record_start_io(iobuf, bio_sectors(bio) << 9);
390                 osd_submit_bio(iobuf->dr_rw, bio);
391                 rc = 0;
392         }
393
394 out:
395         blk_finish_plug(&plug);
396
397         /* in order to achieve better IO throughput, we don't wait for writes
398          * completion here. instead we proceed with transaction commit in
399          * parallel and wait for IO completion once transaction is stopped
400          * see osd_trans_stop() for more details -bzzz */
401         if (iobuf->dr_rw == 0) {
402                 wait_event(iobuf->dr_wait,
403                            atomic_read(&iobuf->dr_numreqs) == 0);
404                 osd_fini_iobuf(osd, iobuf);
405         }
406
407         if (rc == 0)
408                 rc = iobuf->dr_error;
409         RETURN(rc);
410 }
411
412 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
413                                    struct niobuf_local *lnb)
414 {
415         ENTRY;
416
417         *nrpages = 0;
418
419         while (len > 0) {
420                 int poff = offset & (PAGE_SIZE - 1);
421                 int plen = PAGE_SIZE - poff;
422
423                 if (plen > len)
424                         plen = len;
425                 lnb->lnb_file_offset = offset;
426                 lnb->lnb_page_offset = poff;
427                 lnb->lnb_len = plen;
428                 /* lnb->lnb_flags = rnb->rnb_flags; */
429                 lnb->lnb_flags = 0;
430                 lnb->lnb_page = NULL;
431                 lnb->lnb_rc = 0;
432
433                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
434                          (long long) len);
435                 offset += plen;
436                 len -= plen;
437                 lnb++;
438                 (*nrpages)++;
439         }
440
441         RETURN(0);
442 }
443
444 static struct page *osd_get_page(struct dt_object *dt, loff_t offset,
445                                  gfp_t gfp_mask)
446 {
447         struct inode *inode = osd_dt_obj(dt)->oo_inode;
448         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
449         struct page *page;
450
451         LASSERT(inode);
452
453         page = find_or_create_page(inode->i_mapping, offset >> PAGE_SHIFT,
454                                    gfp_mask);
455
456         if (unlikely(page == NULL))
457                 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
458
459         return page;
460 }
461
462 /*
463  * there are following "locks":
464  * journal_start
465  * i_mutex
466  * page lock
467  *
468  * osd write path:
469  *  - lock page(s)
470  *  - journal_start
471  *  - truncate_sem
472  *
473  * ext4 vmtruncate:
474  *  - lock pages, unlock
475  *  - journal_start
476  *  - lock partial page
477  *  - i_data_sem
478  *
479  */
480
481 /**
482  * Unlock and release pages loaded by osd_bufs_get()
483  *
484  * Unlock \a npages pages from \a lnb and drop the refcount on them.
485  *
486  * \param env           thread execution environment
487  * \param dt            dt object undergoing IO (OSD object + methods)
488  * \param lnb           array of pages undergoing IO
489  * \param npages        number of pages in \a lnb
490  *
491  * \retval 0            always
492  */
493 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
494                         struct niobuf_local *lnb, int npages)
495 {
496         struct pagevec pvec;
497         int i;
498
499 #ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
500         pagevec_init(&pvec);
501 #else
502         pagevec_init(&pvec, 0);
503 #endif
504
505         for (i = 0; i < npages; i++) {
506                 if (lnb[i].lnb_page == NULL)
507                         continue;
508                 LASSERT(PageLocked(lnb[i].lnb_page));
509                 unlock_page(lnb[i].lnb_page);
510                 if (pagevec_add(&pvec, lnb[i].lnb_page) == 0)
511                         pagevec_release(&pvec);
512                 dt_object_put(env, dt);
513                 lnb[i].lnb_page = NULL;
514         }
515
516         /* Release any partial pagevec */
517         pagevec_release(&pvec);
518
519         RETURN(0);
520 }
521
522 /**
523  * Load and lock pages undergoing IO
524  *
525  * Pages as described in the \a lnb array are fetched (from disk or cache)
526  * and locked for IO by the caller.
527  *
528  * DLM locking protects us from write and truncate competing for same region,
529  * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
530  * It's possible the writeout on a such a page is in progress when we access
531  * it. It's also possible that during this writeout we put new (partial) data
532  * into the page, but won't be able to proceed in filter_commitrw_write().
533  * Therefore, just wait for writeout completion as it should be rare enough.
534  *
535  * \param env           thread execution environment
536  * \param dt            dt object undergoing IO (OSD object + methods)
537  * \param pos           byte offset of IO start
538  * \param len           number of bytes of IO
539  * \param lnb           array of extents undergoing IO
540  * \param rw            read or write operation, and other flags
541  * \param capa          capabilities
542  *
543  * \retval pages        (zero or more) loaded successfully
544  * \retval -ENOMEM      on memory/page allocation error
545  */
546 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
547                         loff_t pos, ssize_t len, struct niobuf_local *lnb,
548                         enum dt_bufs_type rw)
549 {
550         struct osd_object *obj = osd_dt_obj(dt);
551         int npages, i, rc = 0;
552         gfp_t gfp_mask;
553
554         LASSERT(obj->oo_inode);
555
556         osd_map_remote_to_local(pos, len, &npages, lnb);
557
558         /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
559         gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
560                                              GFP_HIGHUSER;
561         for (i = 0; i < npages; i++, lnb++) {
562                 lnb->lnb_page = osd_get_page(dt, lnb->lnb_file_offset,
563                                              gfp_mask);
564                 if (lnb->lnb_page == NULL)
565                         GOTO(cleanup, rc = -ENOMEM);
566
567                 wait_on_page_writeback(lnb->lnb_page);
568                 BUG_ON(PageWriteback(lnb->lnb_page));
569
570                 lu_object_get(&dt->do_lu);
571         }
572
573         RETURN(i);
574
575 cleanup:
576         if (i > 0)
577                 osd_bufs_put(env, dt, lnb - i, i);
578         return rc;
579 }
580
581 #ifndef HAVE_LDISKFS_MAP_BLOCKS
582
583 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
584 #define ldiskfs_ext_pblock(ex) ext_pblock((ex))
585 #endif
586
587 struct bpointers {
588         sector_t *blocks;
589         unsigned long start;
590         int num;
591         int init_num;
592         int create;
593 };
594
595 static long ldiskfs_ext_find_goal(struct inode *inode,
596                                   struct ldiskfs_ext_path *path,
597                                   unsigned long block, int *aflags)
598 {
599         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
600         unsigned long bg_start;
601         unsigned long colour;
602         int depth;
603
604         if (path) {
605                 struct ldiskfs_extent *ex;
606                 depth = path->p_depth;
607
608                 /* try to predict block placement */
609                 if ((ex = path[depth].p_ext))
610                         return ldiskfs_ext_pblock(ex) +
611                                 (block - le32_to_cpu(ex->ee_block));
612
613                 /* it looks index is empty
614                  * try to find starting from index itself */
615                 if (path[depth].p_bh)
616                         return path[depth].p_bh->b_blocknr;
617         }
618
619         /* OK. use inode's group */
620         bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) +
621                 le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block);
622         colour = (current->pid % 16) *
623                 (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16);
624         return bg_start + colour + block;
625 }
626
627 static unsigned long new_blocks(handle_t *handle, struct inode *inode,
628                                 struct ldiskfs_ext_path *path,
629                                 unsigned long block, unsigned long *count,
630                                 int *err)
631 {
632         struct ldiskfs_allocation_request ar;
633         unsigned long pblock;
634         int aflags;
635
636         /* find neighbour allocated blocks */
637         ar.lleft = block;
638         *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
639         if (*err)
640                 return 0;
641         ar.lright = block;
642         *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright);
643         if (*err)
644                 return 0;
645
646         /* allocate new block */
647         ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags);
648         ar.inode = inode;
649         ar.logical = block;
650         ar.len = *count;
651         ar.flags = LDISKFS_MB_HINT_DATA;
652         pblock = ldiskfs_mb_new_blocks(handle, &ar, err);
653         *count = ar.len;
654         return pblock;
655 }
656
657 static int ldiskfs_ext_new_extent_cb(struct inode *inode,
658                                      struct ldiskfs_ext_path *path,
659                                      struct ldiskfs_ext_cache *cex,
660 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
661                                      struct ldiskfs_extent *ex,
662 #endif
663                                      void *cbdata)
664 {
665         struct bpointers *bp = cbdata;
666         struct ldiskfs_extent nex;
667         unsigned long pblock = 0;
668         unsigned long tgen;
669         int err, i;
670         unsigned long count;
671         handle_t *handle;
672
673 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
674         if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) {
675 #else
676         if ((cex->ec_len != 0) && (cex->ec_start != 0)) {
677 #endif
678                 err = EXT_CONTINUE;
679                 goto map;
680         }
681
682         if (bp->create == 0) {
683                 i = 0;
684                 if (cex->ec_block < bp->start)
685                         i = bp->start - cex->ec_block;
686                 if (i >= cex->ec_len)
687                         CERROR("nothing to do?! i = %d, e_num = %u\n",
688                                         i, cex->ec_len);
689                 for (; i < cex->ec_len && bp->num; i++) {
690                         *(bp->blocks) = 0;
691                         bp->blocks++;
692                         bp->num--;
693                         bp->start++;
694                 }
695
696                 return EXT_CONTINUE;
697         }
698
699         tgen = LDISKFS_I(inode)->i_ext_generation;
700         count = ldiskfs_ext_calc_credits_for_insert(inode, path);
701
702         handle = osd_journal_start(inode, LDISKFS_HT_MISC,
703                                    count + LDISKFS_ALLOC_NEEDED + 1);
704         if (IS_ERR(handle)) {
705                 return PTR_ERR(handle);
706         }
707
708         if (tgen != LDISKFS_I(inode)->i_ext_generation) {
709                 /* the tree has changed. so path can be invalid at moment */
710                 ldiskfs_journal_stop(handle);
711                 return EXT_REPEAT;
712         }
713
714         /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not
715          * protected by i_data_sem as whole. so we patch it to store
716          * generation to path and now verify the tree hasn't changed */
717         down_write((&LDISKFS_I(inode)->i_data_sem));
718
719         /* validate extent, make sure the extent tree does not changed */
720         if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) {
721                 /* cex is invalid, try again */
722                 up_write(&LDISKFS_I(inode)->i_data_sem);
723                 ldiskfs_journal_stop(handle);
724                 return EXT_REPEAT;
725         }
726
727         count = cex->ec_len;
728         pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err);
729         if (!pblock)
730                 goto out;
731         BUG_ON(count > cex->ec_len);
732
733         /* insert new extent */
734         nex.ee_block = cpu_to_le32(cex->ec_block);
735         ldiskfs_ext_store_pblock(&nex, pblock);
736         nex.ee_len = cpu_to_le16(count);
737         err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0);
738         if (err) {
739                 /* free data blocks we just allocated */
740                 /* not a good idea to call discard here directly,
741                  * but otherwise we'd need to call it every free() */
742                 ldiskfs_discard_preallocations(inode);
743 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
744                 ldiskfs_free_blocks(handle, inode, NULL,
745                                     ldiskfs_ext_pblock(&nex),
746                                     le16_to_cpu(nex.ee_len), 0);
747 #else
748                 ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex),
749                                     le16_to_cpu(nex.ee_len), 0);
750 #endif
751                 goto out;
752         }
753
754         /*
755          * Putting len of the actual extent we just inserted,
756          * we are asking ldiskfs_ext_walk_space() to continue
757          * scaning after that block
758          */
759         cex->ec_len = le16_to_cpu(nex.ee_len);
760         cex->ec_start = ldiskfs_ext_pblock(&nex);
761         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
762         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
763
764 out:
765         up_write((&LDISKFS_I(inode)->i_data_sem));
766         ldiskfs_journal_stop(handle);
767 map:
768         if (err >= 0) {
769                 /* map blocks */
770                 if (bp->num == 0) {
771                         CERROR("hmm. why do we find this extent?\n");
772                         CERROR("initial space: %lu:%u\n",
773                                 bp->start, bp->init_num);
774 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
775                         CERROR("current extent: %u/%u/%llu %d\n",
776                                 cex->ec_block, cex->ec_len,
777                                 (unsigned long long)cex->ec_start,
778                                 cex->ec_type);
779 #else
780                         CERROR("current extent: %u/%u/%llu\n",
781                                 cex->ec_block, cex->ec_len,
782                                 (unsigned long long)cex->ec_start);
783 #endif
784                 }
785                 i = 0;
786                 if (cex->ec_block < bp->start)
787                         i = bp->start - cex->ec_block;
788                 if (i >= cex->ec_len)
789                         CERROR("nothing to do?! i = %d, e_num = %u\n",
790                                         i, cex->ec_len);
791                 for (; i < cex->ec_len && bp->num; i++) {
792                         *(bp->blocks) = cex->ec_start + i;
793                         if (pblock != 0) {
794                                 /* unmap any possible underlying metadata from
795                                  * the block device mapping.  bug 6998. */
796 #ifndef HAVE_CLEAN_BDEV_ALIASES
797                                 unmap_underlying_metadata(inode->i_sb->s_bdev,
798                                                           *(bp->blocks));
799 #else
800                                 clean_bdev_aliases(inode->i_sb->s_bdev,
801                                                    *(bp->blocks), 1);
802 #endif
803                         }
804                         bp->blocks++;
805                         bp->num--;
806                         bp->start++;
807                 }
808         }
809         return err;
810 }
811
812 static int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long index,
813                                    int clen, sector_t *blocks, int create)
814 {
815         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
816         struct bpointers bp;
817         int err;
818
819         if (index + clen >= inode->i_sb->s_maxbytes >> PAGE_SHIFT)
820                 return -EFBIG;
821
822         bp.blocks = blocks;
823         bp.start = index * blocks_per_page;
824         bp.init_num = bp.num = clen * blocks_per_page;
825         bp.create = create;
826
827         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
828                bp.start, bp.start + bp.num - 1, (unsigned)inode->i_ino);
829
830         err = ldiskfs_ext_walk_space(inode, bp.start, bp.num,
831                                      ldiskfs_ext_new_extent_cb, &bp);
832         ldiskfs_ext_invalidate_cache(inode);
833
834         return err;
835 }
836
837 static int osd_ldiskfs_map_bm_inode_pages(struct inode *inode,
838                                           struct page **page, int pages,
839                                           sector_t *blocks, int create)
840 {
841         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
842         pgoff_t bitmap_max_page_index;
843         sector_t *b;
844         int rc = 0, i;
845
846         bitmap_max_page_index = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes >>
847                                 PAGE_SHIFT;
848         for (i = 0, b = blocks; i < pages; i++, page++) {
849                 if ((*page)->index + 1 >= bitmap_max_page_index) {
850                         rc = -EFBIG;
851                         break;
852                 }
853                 rc = ldiskfs_map_inode_page(inode, *page, b, create);
854                 if (rc) {
855                         CERROR("ino %lu, blk %llu create %d: rc %d\n",
856                                inode->i_ino,
857                                (unsigned long long)*b, create, rc);
858                         break;
859                 }
860                 b += blocks_per_page;
861         }
862         return rc;
863 }
864
865 static int osd_ldiskfs_map_ext_inode_pages(struct inode *inode,
866                                            struct page **page,
867                                            int pages, sector_t *blocks,
868                                            int create)
869 {
870         int rc = 0, i = 0, clen = 0;
871         struct page *fp = NULL;
872
873         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
874                 inode->i_ino, pages, (*page)->index);
875
876         /* pages are sorted already. so, we just have to find
877          * contig. space and process them properly */
878         while (i < pages) {
879                 if (fp == NULL) {
880                         /* start new extent */
881                         fp = *page++;
882                         clen = 1;
883                         i++;
884                         continue;
885                 } else if (fp->index + clen == (*page)->index) {
886                         /* continue the extent */
887                         page++;
888                         clen++;
889                         i++;
890                         continue;
891                 }
892
893                 /* process found extent */
894                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
895                                              blocks, create);
896                 if (rc)
897                         GOTO(cleanup, rc);
898
899                 /* look for next extent */
900                 fp = NULL;
901                 blocks += clen * (PAGE_SIZE >> inode->i_blkbits);
902         }
903
904         if (fp)
905                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
906                                              blocks, create);
907
908 cleanup:
909         return rc;
910 }
911
912 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
913                                        int pages, sector_t *blocks,
914                                        int create)
915 {
916         int rc;
917
918         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
919                 rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
920                                                      blocks, create);
921                 return rc;
922         }
923         rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create);
924
925         return rc;
926 }
927 #else
928 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
929                                        int pages, sector_t *blocks,
930                                        int create)
931 {
932         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
933         int rc = 0, i = 0;
934         struct page *fp = NULL;
935         int clen = 0;
936         pgoff_t max_page_index;
937         handle_t *handle = NULL;
938
939         max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
940
941         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
942                 inode->i_ino, pages, (*page)->index);
943
944         if (create) {
945                 create = LDISKFS_GET_BLOCKS_CREATE;
946                 handle = ldiskfs_journal_current_handle();
947                 LASSERT(handle != NULL);
948                 rc = osd_attach_jinode(inode);
949                 if (rc)
950                         return rc;
951         }
952         /* pages are sorted already. so, we just have to find
953          * contig. space and process them properly */
954         while (i < pages) {
955                 long blen, total = 0;
956                 struct ldiskfs_map_blocks map = { 0 };
957
958                 if (fp == NULL) { /* start new extent */
959                         fp = *page++;
960                         clen = 1;
961                         if (++i != pages)
962                                 continue;
963                 } else if (fp->index + clen == (*page)->index) {
964                         /* continue the extent */
965                         page++;
966                         clen++;
967                         if (++i != pages)
968                                 continue;
969                 }
970                 if (fp->index + clen >= max_page_index)
971                         GOTO(cleanup, rc = -EFBIG);
972                 /* process found extent */
973                 map.m_lblk = fp->index * blocks_per_page;
974                 map.m_len = blen = clen * blocks_per_page;
975 cont_map:
976                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
977                 if (rc >= 0) {
978                         int c = 0;
979                         for (; total < blen && c < map.m_len; c++, total++) {
980                                 if (rc == 0) {
981                                         *(blocks + total) = 0;
982                                         total++;
983                                         break;
984                                 } else {
985                                         *(blocks + total) = map.m_pblk + c;
986                                         /* unmap any possible underlying
987                                          * metadata from the block device
988                                          * mapping.  bug 6998. */
989                                         if ((map.m_flags & LDISKFS_MAP_NEW) &&
990                                             create)
991 #ifndef HAVE_CLEAN_BDEV_ALIASES
992                                                 unmap_underlying_metadata(
993                                                         inode->i_sb->s_bdev,
994                                                         map.m_pblk + c);
995 #else
996                                                 clean_bdev_aliases(
997                                                         inode->i_sb->s_bdev,
998                                                         map.m_pblk + c, 1);
999 #endif
1000                                 }
1001                         }
1002                         rc = 0;
1003                 }
1004                 if (rc == 0 && total < blen) {
1005                         map.m_lblk = fp->index * blocks_per_page + total;
1006                         map.m_len = blen - total;
1007                         goto cont_map;
1008                 }
1009                 if (rc != 0)
1010                         GOTO(cleanup, rc);
1011
1012                 /* look for next extent */
1013                 fp = NULL;
1014                 blocks += blocks_per_page * clen;
1015         }
1016 cleanup:
1017         return rc;
1018 }
1019 #endif /* HAVE_LDISKFS_MAP_BLOCKS */
1020
1021 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1022                           struct niobuf_local *lnb, int npages)
1023 {
1024         struct osd_thread_info *oti   = osd_oti_get(env);
1025         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
1026         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
1027         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
1028         ktime_t start;
1029         ktime_t end;
1030         s64 timediff;
1031         ssize_t                 isize;
1032         __s64                   maxidx;
1033         int                     rc = 0;
1034         int                     i;
1035         int                     cache = 0;
1036
1037         LASSERT(inode);
1038
1039         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1040         if (unlikely(rc != 0))
1041                 RETURN(rc);
1042
1043         isize = i_size_read(inode);
1044         maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1045
1046         if (osd->od_writethrough_cache)
1047                 cache = 1;
1048         if (isize > osd->od_readcache_max_filesize)
1049                 cache = 0;
1050
1051         start = ktime_get();
1052         for (i = 0; i < npages; i++) {
1053
1054                 if (cache == 0)
1055                         generic_error_remove_page(inode->i_mapping,
1056                                                   lnb[i].lnb_page);
1057
1058                 /*
1059                  * till commit the content of the page is undefined
1060                  * we'll set it uptodate once bulk is done. otherwise
1061                  * subsequent reads can access non-stable data
1062                  */
1063                 ClearPageUptodate(lnb[i].lnb_page);
1064
1065                 if (lnb[i].lnb_len == PAGE_SIZE)
1066                         continue;
1067
1068                 if (maxidx >= lnb[i].lnb_page->index) {
1069                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1070                 } else {
1071                         long off;
1072                         char *p = kmap(lnb[i].lnb_page);
1073
1074                         off = lnb[i].lnb_page_offset;
1075                         if (off)
1076                                 memset(p, 0, off);
1077                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1078                               ~PAGE_MASK;
1079                         if (off)
1080                                 memset(p + off, 0, PAGE_SIZE - off);
1081                         kunmap(lnb[i].lnb_page);
1082                 }
1083         }
1084         end = ktime_get();
1085         timediff = ktime_us_delta(end, start);
1086         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1087
1088         if (iobuf->dr_npages) {
1089                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1090                                                  iobuf->dr_npages,
1091                                                  iobuf->dr_blocks, 0);
1092                 if (likely(rc == 0)) {
1093                         rc = osd_do_bio(osd, inode, iobuf);
1094                         /* do IO stats for preparation reads */
1095                         osd_fini_iobuf(osd, iobuf);
1096                 }
1097         }
1098         RETURN(rc);
1099 }
1100
1101 struct osd_fextent {
1102         sector_t        start;
1103         sector_t        end;
1104         unsigned int    mapped:1;
1105 };
1106
1107 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1108                          struct osd_fextent *cached_extent)
1109 {
1110         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1111         sector_t block = offset >> inode->i_blkbits;
1112         sector_t start;
1113         struct fiemap_extent_info fei = { 0 };
1114         struct fiemap_extent fe = { 0 };
1115         mm_segment_t saved_fs;
1116         int rc;
1117
1118         if (block >= cached_extent->start && block < cached_extent->end)
1119                 return cached_extent->mapped;
1120
1121         if (i_size_read(inode) == 0)
1122                 return 0;
1123
1124         /* Beyond EOF, must not be mapped */
1125         if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1126                 return 0;
1127
1128         fei.fi_extents_max = 1;
1129         fei.fi_extents_start = &fe;
1130
1131         saved_fs = get_fs();
1132         set_fs(get_ds());
1133         rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1134         set_fs(saved_fs);
1135         if (rc != 0)
1136                 return 0;
1137
1138         start = fe.fe_logical >> inode->i_blkbits;
1139
1140         if (start > block) {
1141                 cached_extent->start = block;
1142                 cached_extent->end = start;
1143                 cached_extent->mapped = 0;
1144         } else {
1145                 cached_extent->start = start;
1146                 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1147                                       inode->i_blkbits;
1148                 cached_extent->mapped = 1;
1149         }
1150
1151         return cached_extent->mapped;
1152 }
1153
1154 static int osd_declare_write_commit(const struct lu_env *env,
1155                                     struct dt_object *dt,
1156                                     struct niobuf_local *lnb, int npages,
1157                                     struct thandle *handle)
1158 {
1159         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1160         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1161         struct osd_thandle      *oh;
1162         int                     extents = 1;
1163         int                     depth;
1164         int                     i;
1165         int                     newblocks;
1166         int                     rc = 0;
1167         int                     flags = 0;
1168         int                     credits = 0;
1169         long long               quota_space = 0;
1170         struct osd_fextent      extent = { 0 };
1171         enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1172         ENTRY;
1173
1174         LASSERT(handle != NULL);
1175         oh = container_of0(handle, struct osd_thandle, ot_super);
1176         LASSERT(oh->ot_handle == NULL);
1177
1178         newblocks = npages;
1179
1180         /* calculate number of extents (probably better to pass nb) */
1181         for (i = 0; i < npages; i++) {
1182                 if (i && lnb[i].lnb_file_offset !=
1183                     lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
1184                         extents++;
1185
1186                 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent))
1187                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1188                 else
1189                         quota_space += PAGE_SIZE;
1190
1191                 /* ignore quota for the whole request if any page is from
1192                  * client cache or written by root.
1193                  *
1194                  * XXX once we drop the 1.8 client support, the checking
1195                  * for whether page is from cache can be simplified as:
1196                  * !(lnb[i].flags & OBD_BRW_SYNC)
1197                  *
1198                  * XXX we could handle this on per-lnb basis as done by
1199                  * grant. */
1200                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1201                     (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1202                     OBD_BRW_FROM_GRANT)
1203                         declare_flags |= OSD_QID_FORCE;
1204         }
1205
1206         /*
1207          * each extent can go into new leaf causing a split
1208          * 5 is max tree depth: inode + 4 index blocks
1209          * with blockmaps, depth is 3 at most
1210          */
1211         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1212                 /*
1213                  * many concurrent threads may grow tree by the time
1214                  * our transaction starts. so, consider 2 is a min depth
1215                  */
1216                 depth = ext_depth(inode);
1217                 depth = max(depth, 1) + 1;
1218                 newblocks += depth;
1219                 credits++; /* inode */
1220                 credits += depth * 2 * extents;
1221         } else {
1222                 depth = 3;
1223                 newblocks += depth;
1224                 credits++; /* inode */
1225                 credits += depth * extents;
1226         }
1227
1228         /* quota space for metadata blocks */
1229         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1230
1231         /* quota space should be reported in 1K blocks */
1232         quota_space = toqb(quota_space);
1233
1234         /* each new block can go in different group (bitmap + gd) */
1235
1236         /* we can't dirty more bitmap blocks than exist */
1237         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1238                 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1239         else
1240                 credits += newblocks;
1241
1242         /* we can't dirty more gd blocks than exist */
1243         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1244                 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1245         else
1246                 credits += newblocks;
1247
1248         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1249
1250         /* make sure the over quota flags were not set */
1251         lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1252
1253         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1254                                    i_projid_read(inode), quota_space, oh,
1255                                    osd_dt_obj(dt), &flags, declare_flags);
1256
1257         /* we need only to store the overquota flags in the first lnb for
1258          * now, once we support multiple objects BRW, this code needs be
1259          * revised. */
1260         if (flags & QUOTA_FL_OVER_USRQUOTA)
1261                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1262         if (flags & QUOTA_FL_OVER_GRPQUOTA)
1263                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1264         if (flags & QUOTA_FL_OVER_PRJQUOTA)
1265                 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1266
1267         RETURN(rc);
1268 }
1269
1270 /* Check if a block is allocated or not */
1271 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1272                             struct niobuf_local *lnb, int npages,
1273                             struct thandle *thandle)
1274 {
1275         struct osd_thread_info *oti = osd_oti_get(env);
1276         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1277         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1278         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1279         loff_t isize;
1280         int rc = 0, i;
1281
1282         LASSERT(inode);
1283
1284         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1285         if (unlikely(rc != 0))
1286                 RETURN(rc);
1287
1288         isize = i_size_read(inode);
1289         ll_vfs_dq_init(inode);
1290
1291         for (i = 0; i < npages; i++) {
1292                 if (lnb[i].lnb_rc == -ENOSPC &&
1293                     (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1294                         /* Allow the write to proceed if overwriting an
1295                          * existing block */
1296                         lnb[i].lnb_rc = 0;
1297                 }
1298
1299                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1300                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1301                                lnb[i].lnb_rc);
1302                         LASSERT(lnb[i].lnb_page);
1303                         generic_error_remove_page(inode->i_mapping,
1304                                                   lnb[i].lnb_page);
1305                         continue;
1306                 }
1307
1308                 LASSERT(PageLocked(lnb[i].lnb_page));
1309                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1310
1311                 if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize)
1312                         isize = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1313
1314                 /*
1315                  * Since write and truncate are serialized by oo_sem, even
1316                  * partial-page truncate should not leave dirty pages in the
1317                  * page cache.
1318                  */
1319                 LASSERT(!PageDirty(lnb[i].lnb_page));
1320
1321                 SetPageUptodate(lnb[i].lnb_page);
1322
1323                 osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1324         }
1325
1326         osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1327
1328         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1329                 rc = -ENOSPC;
1330         } else if (iobuf->dr_npages > 0) {
1331                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1332                                                  iobuf->dr_npages,
1333                                                  iobuf->dr_blocks, 1);
1334         } else {
1335                 /* no pages to write, no transno is needed */
1336                 thandle->th_local = 1;
1337         }
1338
1339         if (likely(rc == 0)) {
1340                 spin_lock(&inode->i_lock);
1341                 if (isize > i_size_read(inode)) {
1342                         i_size_write(inode, isize);
1343                         LDISKFS_I(inode)->i_disksize = isize;
1344                         spin_unlock(&inode->i_lock);
1345                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1346                 } else {
1347                         spin_unlock(&inode->i_lock);
1348                 }
1349
1350                 rc = osd_do_bio(osd, inode, iobuf);
1351                 /* we don't do stats here as in read path because
1352                  * write is async: we'll do this in osd_put_bufs() */
1353         } else {
1354                 osd_fini_iobuf(osd, iobuf);
1355         }
1356
1357         osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1358
1359         if (unlikely(rc != 0)) {
1360                 /* if write fails, we should drop pages from the cache */
1361                 for (i = 0; i < npages; i++) {
1362                         if (lnb[i].lnb_page == NULL)
1363                                 continue;
1364                         LASSERT(PageLocked(lnb[i].lnb_page));
1365                         generic_error_remove_page(inode->i_mapping,
1366                                                   lnb[i].lnb_page);
1367                 }
1368         }
1369
1370         RETURN(rc);
1371 }
1372
1373 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1374                          struct niobuf_local *lnb, int npages)
1375 {
1376         struct osd_thread_info *oti = osd_oti_get(env);
1377         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1378         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1379         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1380         int rc = 0, i, cache = 0, cache_hits = 0, cache_misses = 0;
1381         ktime_t start, end;
1382         s64 timediff;
1383         loff_t isize;
1384
1385         LASSERT(inode);
1386
1387         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1388         if (unlikely(rc != 0))
1389                 RETURN(rc);
1390
1391         isize = i_size_read(inode);
1392
1393         if (osd->od_read_cache)
1394                 cache = 1;
1395         if (isize > osd->od_readcache_max_filesize)
1396                 cache = 0;
1397
1398         start = ktime_get();
1399         for (i = 0; i < npages; i++) {
1400
1401                 if (isize <= lnb[i].lnb_file_offset)
1402                         /* If there's no more data, abort early.
1403                          * lnb->lnb_rc == 0, so it's easy to detect later. */
1404                         break;
1405
1406                 if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len)
1407                         lnb[i].lnb_rc = isize - lnb[i].lnb_file_offset;
1408                 else
1409                         lnb[i].lnb_rc = lnb[i].lnb_len;
1410
1411                 /* Bypass disk read if fail_loc is set properly */
1412                 if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
1413                         SetPageUptodate(lnb[i].lnb_page);
1414
1415                 if (PageUptodate(lnb[i].lnb_page)) {
1416                         cache_hits++;
1417                 } else {
1418                         cache_misses++;
1419                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1420                 }
1421
1422                 if (cache == 0)
1423                         generic_error_remove_page(inode->i_mapping,
1424                                                   lnb[i].lnb_page);
1425         }
1426         end = ktime_get();
1427         timediff = ktime_us_delta(end, start);
1428         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1429
1430         if (cache_hits != 0)
1431                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1432                                     cache_hits);
1433         if (cache_misses != 0)
1434                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1435                                     cache_misses);
1436         if (cache_hits + cache_misses != 0)
1437                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1438                                     cache_hits + cache_misses);
1439
1440         if (iobuf->dr_npages) {
1441                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1442                                                  iobuf->dr_npages,
1443                                                  iobuf->dr_blocks, 0);
1444                 rc = osd_do_bio(osd, inode, iobuf);
1445
1446                 /* IO stats will be done in osd_bufs_put() */
1447         }
1448
1449         RETURN(rc);
1450 }
1451
1452 /*
1453  * XXX: Another layering violation for now.
1454  *
1455  * We don't want to use ->f_op->read methods, because generic file write
1456  *
1457  *         - serializes on ->i_sem, and
1458  *
1459  *         - does a lot of extra work like balance_dirty_pages(),
1460  *
1461  * which doesn't work for globally shared files like /last_rcvd.
1462  */
1463 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1464 {
1465         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1466
1467         memcpy(buffer, (char *)ei->i_data, buflen);
1468
1469         return  buflen;
1470 }
1471
1472 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1473 {
1474         struct buffer_head *bh;
1475         unsigned long block;
1476         int osize;
1477         int blocksize;
1478         int csize;
1479         int boffs;
1480
1481         /* prevent reading after eof */
1482         spin_lock(&inode->i_lock);
1483         if (i_size_read(inode) < *offs + size) {
1484                 loff_t diff = i_size_read(inode) - *offs;
1485                 spin_unlock(&inode->i_lock);
1486                 if (diff < 0) {
1487                         CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
1488                                i_size_read(inode), *offs);
1489                         return -EBADR;
1490                 } else if (diff == 0) {
1491                         return 0;
1492                 } else {
1493                         size = diff;
1494                 }
1495         } else {
1496                 spin_unlock(&inode->i_lock);
1497         }
1498
1499         blocksize = 1 << inode->i_blkbits;
1500         osize = size;
1501         while (size > 0) {
1502                 block = *offs >> inode->i_blkbits;
1503                 boffs = *offs & (blocksize - 1);
1504                 csize = min(blocksize - boffs, size);
1505                 bh = __ldiskfs_bread(NULL, inode, block, 0);
1506                 if (IS_ERR(bh)) {
1507                         CERROR("%s: can't read %u@%llu on ino %lu: "
1508                                "rc = %ld\n", osd_ino2name(inode),
1509                                csize, *offs, inode->i_ino,
1510                                PTR_ERR(bh));
1511                         return PTR_ERR(bh);
1512                 }
1513
1514                 if (bh != NULL) {
1515                         memcpy(buf, bh->b_data + boffs, csize);
1516                         brelse(bh);
1517                 } else {
1518                         memset(buf, 0, csize);
1519                 }
1520
1521                 *offs += csize;
1522                 buf += csize;
1523                 size -= csize;
1524         }
1525         return osize;
1526 }
1527
1528 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1529                         struct lu_buf *buf, loff_t *pos)
1530 {
1531         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1532         int           rc;
1533
1534         /* Read small symlink from inode body as we need to maintain correct
1535          * on-disk symlinks for ldiskfs.
1536          */
1537         if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
1538             (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1539                 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
1540         else
1541                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1542
1543         return rc;
1544 }
1545
1546 static inline int osd_extents_enabled(struct super_block *sb,
1547                                       struct inode *inode)
1548 {
1549         if (inode != NULL) {
1550                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1551                         return 1;
1552         } else if (ldiskfs_has_feature_extents(sb)) {
1553                 return 1;
1554         }
1555         return 0;
1556 }
1557
1558 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1559                            const loff_t size, const loff_t pos,
1560                            const int blocks)
1561 {
1562         int credits, bits, bs, i;
1563
1564         bits = sb->s_blocksize_bits;
1565         bs = 1 << bits;
1566
1567         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1568          * we do not expect blockmaps on the large files,
1569          * so let's shrink it to 2 levels (4GB files) */
1570
1571         /* this is default reservation: 2 levels */
1572         credits = (blocks + 2) * 3;
1573
1574         /* actual offset is unknown, hard to optimize */
1575         if (pos == -1)
1576                 return credits;
1577
1578         /* now check for few specific cases to optimize */
1579         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1580                 /* no indirects */
1581                 credits = blocks;
1582                 /* allocate if not allocated */
1583                 if (inode == NULL) {
1584                         credits += blocks * 2;
1585                         return credits;
1586                 }
1587                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1588                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1589                         if (LDISKFS_I(inode)->i_data[i] == 0)
1590                                 credits += 2;
1591                 }
1592         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1593                 /* single indirect */
1594                 credits = blocks * 3;
1595                 if (inode == NULL ||
1596                     LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1597                         credits += 3;
1598                 else
1599                         /* The indirect block may be modified. */
1600                         credits += 1;
1601         }
1602
1603         return credits;
1604 }
1605
1606 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1607                                  const struct lu_buf *buf, loff_t _pos,
1608                                  struct thandle *handle)
1609 {
1610         struct osd_object  *obj  = osd_dt_obj(dt);
1611         struct inode       *inode = obj->oo_inode;
1612         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1613         struct osd_thandle *oh;
1614         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1615         int                 bits, bs;
1616         int                 depth, size;
1617         loff_t              pos;
1618         ENTRY;
1619
1620         LASSERT(buf != NULL);
1621         LASSERT(handle != NULL);
1622
1623         oh = container_of0(handle, struct osd_thandle, ot_super);
1624         LASSERT(oh->ot_handle == NULL);
1625
1626         size = buf->lb_len;
1627         bits = sb->s_blocksize_bits;
1628         bs = 1 << bits;
1629
1630         if (_pos == -1) {
1631                 /* if this is an append, then we
1632                  * should expect cross-block record */
1633                 pos = 0;
1634         } else {
1635                 pos = _pos;
1636         }
1637
1638         /* blocks to modify */
1639         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1640         LASSERT(blocks > 0);
1641
1642         if (inode != NULL && _pos != -1) {
1643                 /* object size in blocks */
1644                 est = (i_size_read(inode) + bs - 1) >> bits;
1645                 allocated = inode->i_blocks >> (bits - 9);
1646                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1647                         /* looks like an overwrite, no need to modify tree */
1648                         credits = blocks;
1649                         /* no need to modify i_size */
1650                         goto out;
1651                 }
1652         }
1653
1654         if (osd_extents_enabled(sb, inode)) {
1655                 /*
1656                  * many concurrent threads may grow tree by the time
1657                  * our transaction starts. so, consider 2 is a min depth
1658                  * for every level we may need to allocate a new block
1659                  * and take some entries from the old one. so, 3 blocks
1660                  * to allocate (bitmap, gd, itself) + old block - 4 per
1661                  * level.
1662                  */
1663                 depth = inode != NULL ? ext_depth(inode) : 0;
1664                 depth = max(depth, 1) + 1;
1665                 credits = depth;
1666                 /* if not append, then split may need to modify
1667                  * existing blocks moving entries into the new ones */
1668                 if (_pos != -1)
1669                         credits += depth;
1670                 /* blocks to store data: bitmap,gd,itself */
1671                 credits += blocks * 3;
1672         } else {
1673                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1674         }
1675         /* if inode is created as part of the transaction,
1676          * then it's counted already by the creation method */
1677         if (inode != NULL)
1678                 credits++;
1679
1680 out:
1681
1682         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1683
1684         /* dt_declare_write() is usually called for system objects, such
1685          * as llog or last_rcvd files. We needn't enforce quota on those
1686          * objects, so always set the lqi_space as 0. */
1687         if (inode != NULL)
1688                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1689                                            i_gid_read(inode),
1690                                            i_projid_read(inode), 0,
1691                                            oh, obj, NULL, OSD_QID_BLK);
1692         RETURN(rc);
1693 }
1694
1695 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1696 {
1697         /* LU-2634: clear the extent format for fast symlink */
1698         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1699
1700         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1701         spin_lock(&inode->i_lock);
1702         LDISKFS_I(inode)->i_disksize = buflen;
1703         i_size_write(inode, buflen);
1704         spin_unlock(&inode->i_lock);
1705         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1706
1707         return 0;
1708 }
1709
1710 int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
1711                              int write_NUL, loff_t *offs, handle_t *handle)
1712 {
1713         struct buffer_head *bh        = NULL;
1714         loff_t              offset    = *offs;
1715         loff_t              new_size  = i_size_read(inode);
1716         unsigned long       block;
1717         int                 blocksize = 1 << inode->i_blkbits;
1718         int                 err = 0;
1719         int                 size;
1720         int                 boffs;
1721         int                 dirty_inode = 0;
1722
1723         if (write_NUL) {
1724                 /*
1725                  * long symlink write does not count the NUL terminator in
1726                  * bufsize, we write it, and the inode's file size does not
1727                  * count the NUL terminator as well.
1728                  */
1729                 ((char *)buf)[bufsize] = '\0';
1730                 ++bufsize;
1731         }
1732
1733         while (bufsize > 0) {
1734                 int credits = handle->h_buffer_credits;
1735
1736                 if (bh)
1737                         brelse(bh);
1738
1739                 block = offset >> inode->i_blkbits;
1740                 boffs = offset & (blocksize - 1);
1741                 size = min(blocksize - boffs, bufsize);
1742                 bh = __ldiskfs_bread(handle, inode, block, 1);
1743                 if (IS_ERR_OR_NULL(bh)) {
1744                         if (bh == NULL) {
1745                                 err = -EIO;
1746                         } else {
1747                                 err = PTR_ERR(bh);
1748                                 bh = NULL;
1749                         }
1750
1751                         CERROR("%s: error reading offset %llu (block %lu, "
1752                                "size %d, offs %llu), credits %d/%d: rc = %d\n",
1753                                inode->i_sb->s_id, offset, block, bufsize, *offs,
1754                                credits, handle->h_buffer_credits, err);
1755                         break;
1756                 }
1757
1758                 err = ldiskfs_journal_get_write_access(handle, bh);
1759                 if (err) {
1760                         CERROR("journal_get_write_access() returned error %d\n",
1761                                err);
1762                         break;
1763                 }
1764                 LASSERTF(boffs + size <= bh->b_size,
1765                          "boffs %d size %d bh->b_size %lu\n",
1766                          boffs, size, (unsigned long)bh->b_size);
1767                 memcpy(bh->b_data + boffs, buf, size);
1768                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
1769                 if (err)
1770                         break;
1771
1772                 if (offset + size > new_size)
1773                         new_size = offset + size;
1774                 offset += size;
1775                 bufsize -= size;
1776                 buf += size;
1777         }
1778         if (bh)
1779                 brelse(bh);
1780
1781         if (write_NUL)
1782                 --new_size;
1783         /* correct in-core and on-disk sizes */
1784         if (new_size > i_size_read(inode)) {
1785                 spin_lock(&inode->i_lock);
1786                 if (new_size > i_size_read(inode))
1787                         i_size_write(inode, new_size);
1788                 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
1789                         LDISKFS_I(inode)->i_disksize = i_size_read(inode);
1790                         dirty_inode = 1;
1791                 }
1792                 spin_unlock(&inode->i_lock);
1793                 if (dirty_inode)
1794                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1795         }
1796
1797         if (err == 0)
1798                 *offs = offset;
1799         return err;
1800 }
1801
1802 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1803                          const struct lu_buf *buf, loff_t *pos,
1804                          struct thandle *handle, int ignore_quota)
1805 {
1806         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1807         struct osd_thandle      *oh;
1808         ssize_t                 result;
1809         int                     is_link;
1810
1811         LASSERT(dt_object_exists(dt));
1812
1813         LASSERT(handle != NULL);
1814         LASSERT(inode != NULL);
1815         ll_vfs_dq_init(inode);
1816
1817         /* XXX: don't check: one declared chunk can be used many times */
1818         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
1819
1820         oh = container_of(handle, struct osd_thandle, ot_super);
1821         LASSERT(oh->ot_handle->h_transaction != NULL);
1822         osd_trans_exec_op(env, handle, OSD_OT_WRITE);
1823
1824         /* Write small symlink to inode body as we need to maintain correct
1825          * on-disk symlinks for ldiskfs.
1826          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
1827          * does not count it in.
1828          */
1829         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
1830         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1831                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
1832         else
1833                 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
1834                                                   buf->lb_len, is_link, pos,
1835                                                   oh->ot_handle);
1836         if (result == 0)
1837                 result = buf->lb_len;
1838
1839         osd_trans_exec_check(env, handle, OSD_OT_WRITE);
1840
1841         return result;
1842 }
1843
1844 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1845                              __u64 start, __u64 end, struct thandle *th)
1846 {
1847         struct osd_thandle *oh;
1848         struct inode       *inode;
1849         int                 rc;
1850         ENTRY;
1851
1852         LASSERT(th);
1853         oh = container_of(th, struct osd_thandle, ot_super);
1854
1855         /*
1856          * we don't need to reserve credits for whole truncate
1857          * it's not possible as truncate may need to free too many
1858          * blocks and that won't fit a single transaction. instead
1859          * we reserve credits to change i_size and put inode onto
1860          * orphan list. if needed truncate will extend or restart
1861          * transaction
1862          */
1863         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
1864                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
1865
1866         inode = osd_dt_obj(dt)->oo_inode;
1867         LASSERT(inode);
1868
1869         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1870                                    i_projid_read(inode), 0, oh, osd_dt_obj(dt),
1871                                    NULL, OSD_QID_BLK);
1872         RETURN(rc);
1873 }
1874
1875 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1876                      __u64 start, __u64 end, struct thandle *th)
1877 {
1878         struct osd_thandle *oh;
1879         struct osd_object  *obj = osd_dt_obj(dt);
1880         struct inode       *inode = obj->oo_inode;
1881         handle_t           *h;
1882         tid_t               tid;
1883         int                rc = 0, rc2 = 0;
1884         ENTRY;
1885
1886         LASSERT(end == OBD_OBJECT_EOF);
1887         LASSERT(dt_object_exists(dt));
1888         LASSERT(osd_invariant(obj));
1889         LASSERT(inode != NULL);
1890         ll_vfs_dq_init(inode);
1891
1892         LASSERT(th);
1893         oh = container_of(th, struct osd_thandle, ot_super);
1894         LASSERT(oh->ot_handle->h_transaction != NULL);
1895
1896         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
1897
1898         tid = oh->ot_handle->h_transaction->t_tid;
1899
1900         spin_lock(&inode->i_lock);
1901         i_size_write(inode, start);
1902         spin_unlock(&inode->i_lock);
1903         ll_truncate_pagecache(inode, start);
1904 #ifdef HAVE_INODEOPS_TRUNCATE
1905         if (inode->i_op->truncate) {
1906                 inode->i_op->truncate(inode);
1907         } else
1908 #endif
1909                 ldiskfs_truncate(inode);
1910
1911         /*
1912          * For a partial-page truncate, flush the page to disk immediately to
1913          * avoid data corruption during direct disk write.  b=17397
1914          */
1915         if ((start & ~PAGE_MASK) != 0)
1916                 rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
1917
1918         h = journal_current_handle();
1919         LASSERT(h != NULL);
1920         LASSERT(h == oh->ot_handle);
1921
1922         /* do not check credits with osd_trans_exec_check() as the truncate
1923          * can restart the transaction internally and we restart the
1924          * transaction in this case */
1925
1926         if (tid != h->h_transaction->t_tid) {
1927                 int credits = oh->ot_credits;
1928                 /*
1929                  * transaction has changed during truncate
1930                  * we need to restart the handle with our credits
1931                  */
1932                 if (h->h_buffer_credits < credits) {
1933                         if (ldiskfs_journal_extend(h, credits))
1934                                 rc2 = ldiskfs_journal_restart(h, credits);
1935                 }
1936         }
1937
1938         RETURN(rc == 0 ? rc2 : rc);
1939 }
1940
1941 static int fiemap_check_ranges(struct inode *inode,
1942                                u64 start, u64 len, u64 *new_len)
1943 {
1944         loff_t maxbytes;
1945
1946         *new_len = len;
1947
1948         if (len == 0)
1949                 return -EINVAL;
1950
1951         if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
1952                 maxbytes = inode->i_sb->s_maxbytes;
1953         else
1954                 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
1955
1956         if (start > maxbytes)
1957                 return -EFBIG;
1958
1959         /*
1960          * Shrink request scope to what the fs can actually handle.
1961          */
1962         if (len > maxbytes || (maxbytes - len) < start)
1963                 *new_len = maxbytes - start;
1964
1965         return 0;
1966 }
1967
1968 /* So that the fiemap access checks can't overflow on 32 bit machines. */
1969 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
1970
1971 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
1972                           struct fiemap *fm)
1973 {
1974         struct fiemap_extent_info fieinfo = {0, };
1975         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1976         u64 len;
1977         int rc;
1978
1979
1980         LASSERT(inode);
1981         if (inode->i_op->fiemap == NULL)
1982                 return -EOPNOTSUPP;
1983
1984         if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
1985                 return -EINVAL;
1986
1987         rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
1988         if (rc)
1989                 return rc;
1990
1991         fieinfo.fi_flags = fm->fm_flags;
1992         fieinfo.fi_extents_max = fm->fm_extent_count;
1993         fieinfo.fi_extents_start = fm->fm_extents;
1994
1995         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
1996                 filemap_write_and_wait(inode->i_mapping);
1997
1998         rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
1999         fm->fm_flags = fieinfo.fi_flags;
2000         fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2001
2002         return rc;
2003 }
2004
2005 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2006                        __u64 start, __u64 end, enum lu_ladvise_type advice)
2007 {
2008         int              rc = 0;
2009         struct inode    *inode = osd_dt_obj(dt)->oo_inode;
2010         ENTRY;
2011
2012         switch (advice) {
2013         case LU_LADVISE_DONTNEED:
2014                 if (end == 0)
2015                         break;
2016                 invalidate_mapping_pages(inode->i_mapping,
2017                                          start >> PAGE_SHIFT,
2018                                          (end - 1) >> PAGE_SHIFT);
2019                 break;
2020         default:
2021                 rc = -ENOTSUPP;
2022                 break;
2023         }
2024
2025         RETURN(rc);
2026 }
2027
2028 /*
2029  * in some cases we may need declare methods for objects being created
2030  * e.g., when we create symlink
2031  */
2032 const struct dt_body_operations osd_body_ops_new = {
2033         .dbo_declare_write = osd_declare_write,
2034 };
2035
2036 const struct dt_body_operations osd_body_ops = {
2037         .dbo_read                       = osd_read,
2038         .dbo_declare_write              = osd_declare_write,
2039         .dbo_write                      = osd_write,
2040         .dbo_bufs_get                   = osd_bufs_get,
2041         .dbo_bufs_put                   = osd_bufs_put,
2042         .dbo_write_prep                 = osd_write_prep,
2043         .dbo_declare_write_commit       = osd_declare_write_commit,
2044         .dbo_write_commit               = osd_write_commit,
2045         .dbo_read_prep                  = osd_read_prep,
2046         .dbo_declare_punch              = osd_declare_punch,
2047         .dbo_punch                      = osd_punch,
2048         .dbo_fiemap_get                 = osd_fiemap_get,
2049         .dbo_ladvise                    = osd_ladvise,
2050 };