Whamcloud - gitweb
LU-14487 modules: remove references to Sun Trademark.
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/osd/osd_io.c
32  *
33  * body operations
34  *
35  * Author: Nikita Danilov <nikita@clusterfs.com>
36  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
37  *
38  */
39
40 #define DEBUG_SUBSYSTEM S_OSD
41
42 /* prerequisite for linux/xattr.h */
43 #include <linux/types.h>
44 /* prerequisite for linux/xattr.h */
45 #include <linux/fs.h>
46 #include <linux/mm.h>
47 #include <linux/pagevec.h>
48
49 /*
50  * struct OBD_{ALLOC,FREE}*()
51  * OBD_FAIL_CHECK
52  */
53 #include <obd_support.h>
54
55 #include "osd_internal.h"
56
57 /* ext_depth() */
58 #include <ldiskfs/ldiskfs_extents.h>
59
60 static inline bool osd_use_page_cache(struct osd_device *d)
61 {
62         /* do not use pagecache if write and read caching are disabled */
63         if (d->od_writethrough_cache + d->od_read_cache == 0)
64                 return false;
65         /* use pagecache by default */
66         return true;
67 }
68
69 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
70                             int rw, int line, int pages)
71 {
72         int blocks, i;
73
74         LASSERTF(iobuf->dr_elapsed_valid == 0,
75                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
76                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
77                  iobuf->dr_init_at);
78         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
79
80         init_waitqueue_head(&iobuf->dr_wait);
81         atomic_set(&iobuf->dr_numreqs, 0);
82         iobuf->dr_npages = 0;
83         iobuf->dr_error = 0;
84         iobuf->dr_dev = d;
85         iobuf->dr_frags = 0;
86         iobuf->dr_elapsed = ktime_set(0, 0);
87         /* must be counted before, so assert */
88         iobuf->dr_rw = rw;
89         iobuf->dr_init_at = line;
90
91         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
92         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
93                 LASSERT(iobuf->dr_pg_buf.lb_len >=
94                         pages * sizeof(iobuf->dr_pages[0]));
95                 return 0;
96         }
97
98         /* start with 1MB for 4K blocks */
99         i = 256;
100         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
101                 i <<= 1;
102
103         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
104                (unsigned int)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
105         pages = i;
106         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
107         iobuf->dr_max_pages = 0;
108         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
109                (unsigned int)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
110
111         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
112         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
113         if (unlikely(iobuf->dr_blocks == NULL))
114                 return -ENOMEM;
115
116         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
117         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
118         if (unlikely(iobuf->dr_pages == NULL))
119                 return -ENOMEM;
120
121         lu_buf_realloc(&iobuf->dr_lnb_buf,
122                        pages * sizeof(iobuf->dr_lnbs[0]));
123         iobuf->dr_lnbs = iobuf->dr_lnb_buf.lb_buf;
124         if (unlikely(iobuf->dr_lnbs == NULL))
125                 return -ENOMEM;
126
127         iobuf->dr_max_pages = pages;
128
129         return 0;
130 }
131 #define osd_init_iobuf(dev, iobuf, rw, pages) \
132         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
133
134 static void osd_iobuf_add_page(struct osd_iobuf *iobuf,
135                                struct niobuf_local *lnb)
136 {
137         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
138         iobuf->dr_pages[iobuf->dr_npages] = lnb->lnb_page;
139         iobuf->dr_lnbs[iobuf->dr_npages] = lnb;
140         iobuf->dr_npages++;
141 }
142
143 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
144 {
145         int rw = iobuf->dr_rw;
146
147         if (iobuf->dr_elapsed_valid) {
148                 iobuf->dr_elapsed_valid = 0;
149                 LASSERT(iobuf->dr_dev == d);
150                 LASSERT(iobuf->dr_frags > 0);
151                 lprocfs_oh_tally(&d->od_brw_stats.hist[BRW_R_DIO_FRAGS+rw],
152                                  iobuf->dr_frags);
153                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
154                                       ktime_to_ms(iobuf->dr_elapsed));
155         }
156 }
157
158 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
159 static void dio_complete_routine(struct bio *bio)
160 {
161         int error = blk_status_to_errno(bio->bi_status);
162 #else
163 static void dio_complete_routine(struct bio *bio, int error)
164 {
165 #endif
166         struct osd_iobuf *iobuf = bio->bi_private;
167         struct bio_vec *bvl;
168
169         /* CAVEAT EMPTOR: possibly in IRQ context
170          * DO NOT record procfs stats here!!!
171          */
172
173         if (unlikely(iobuf == NULL)) {
174                 CERROR("***** bio->bi_private is NULL!  This should never happen.  Normally, I would crash here, but instead I will dump the bio contents to the console.  Please report this to <https://jira.whamcloud.com/> , along with any interesting messages leading up to this point (like SCSI errors, perhaps).  Because bi_private is NULL, I can't wake up the thread that initiated this IO - you will probably have to reboot this node.\n");
175                 CERROR("bi_next: %p, bi_flags: %lx, " __stringify(bi_opf)
176                        ": %x, bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, bi_private: %p\n",
177                        bio->bi_next, (unsigned long)bio->bi_flags,
178                        (unsigned int)bio->bi_opf, bio->bi_vcnt, bio_idx(bio),
179                        bio_sectors(bio) << 9, bio->bi_end_io,
180                        atomic_read(&bio->__bi_cnt),
181                        bio->bi_private);
182                 return;
183         }
184
185         /* the check is outside of the cycle for performance reason -bzzz */
186         if (!bio_data_dir(bio)) {
187                 DECLARE_BVEC_ITER_ALL(iter_all);
188
189                 bio_for_each_segment_all(bvl, bio, iter_all) {
190                         if (likely(error == 0))
191                                 SetPageUptodate(bvl_to_page(bvl));
192                         LASSERT(PageLocked(bvl_to_page(bvl)));
193                 }
194                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
195         } else {
196                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
197         }
198
199         /* any real error is good enough -bzzz */
200         if (error != 0 && iobuf->dr_error == 0)
201                 iobuf->dr_error = error;
202
203         /*
204          * set dr_elapsed before dr_numreqs turns to 0, otherwise
205          * it's possible that service thread will see dr_numreqs
206          * is zero, but dr_elapsed is not set yet, leading to lost
207          * data in this processing and an assertion in a subsequent
208          * call to OSD.
209          */
210         if (atomic_read(&iobuf->dr_numreqs) == 1) {
211                 ktime_t now = ktime_get();
212
213                 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
214                 iobuf->dr_elapsed_valid = 1;
215         }
216         if (atomic_dec_and_test(&iobuf->dr_numreqs))
217                 wake_up(&iobuf->dr_wait);
218
219         /* Completed bios used to be chained off iobuf->dr_bios and freed in
220          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
221          * mempool when serious on-disk fragmentation was encountered,
222          * deadlocking the OST.  The bios are now released as soon as complete
223          * so the pool cannot be exhausted while IOs are competing. b=10076
224          */
225         bio_put(bio);
226 }
227
228 static void record_start_io(struct osd_iobuf *iobuf, int size)
229 {
230         struct osd_device    *osd = iobuf->dr_dev;
231         struct obd_histogram *h = osd->od_brw_stats.hist;
232
233         iobuf->dr_frags++;
234         atomic_inc(&iobuf->dr_numreqs);
235
236         if (iobuf->dr_rw == 0) {
237                 atomic_inc(&osd->od_r_in_flight);
238                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
239                                  atomic_read(&osd->od_r_in_flight));
240                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
241         } else if (iobuf->dr_rw == 1) {
242                 atomic_inc(&osd->od_w_in_flight);
243                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
244                                  atomic_read(&osd->od_w_in_flight));
245                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
246         } else {
247                 LBUG();
248         }
249 }
250
251 static void osd_submit_bio(int rw, struct bio *bio)
252 {
253         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
254 #ifdef HAVE_SUBMIT_BIO_2ARGS
255         submit_bio(rw ? WRITE : READ, bio);
256 #else
257         bio->bi_opf |= rw;
258         submit_bio(bio);
259 #endif
260 }
261
262 static int can_be_merged(struct bio *bio, sector_t sector)
263 {
264         if (bio == NULL)
265                 return 0;
266
267         return bio_end_sector(bio) == sector ? 1 : 0;
268 }
269
270 #if IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)
271 /*
272  * This function will change the data written, thus it should only be
273  * used when checking data integrity feature
274  */
275 static void bio_integrity_fault_inject(struct bio *bio)
276 {
277         struct bio_vec *bvec;
278         DECLARE_BVEC_ITER_ALL(iter_all);
279         void *kaddr;
280         char *addr;
281
282         bio_for_each_segment_all(bvec, bio, iter_all) {
283                 struct page *page = bvec->bv_page;
284
285                 kaddr = kmap(page);
286                 addr = kaddr;
287                 *addr = ~(*addr);
288                 kunmap(page);
289                 break;
290         }
291 }
292
293 static int bio_dif_compare(__u16 *expected_guard_buf, void *bio_prot_buf,
294                            unsigned int sectors, int tuple_size)
295 {
296         __u16 *expected_guard;
297         __u16 *bio_guard;
298         int i;
299
300         expected_guard = expected_guard_buf;
301         for (i = 0; i < sectors; i++) {
302                 bio_guard = (__u16 *)bio_prot_buf;
303                 if (*bio_guard != *expected_guard) {
304                         CERROR(
305                                "unexpected guard tags on sector %d expected guard %u, bio guard %u, sectors %u, tuple size %d\n",
306                                i, *expected_guard, *bio_guard, sectors,
307                                tuple_size);
308                         return -EIO;
309                 }
310                 expected_guard++;
311                 bio_prot_buf += tuple_size;
312         }
313         return 0;
314 }
315
316 static int osd_bio_integrity_compare(struct bio *bio, struct block_device *bdev,
317                                      struct osd_iobuf *iobuf, int index)
318 {
319         struct blk_integrity *bi = bdev_get_integrity(bdev);
320         struct bio_integrity_payload *bip = bio->bi_integrity;
321         struct niobuf_local *lnb;
322         unsigned short sector_size = blk_integrity_interval(bi);
323         void *bio_prot_buf = page_address(bip->bip_vec->bv_page) +
324                 bip->bip_vec->bv_offset;
325         struct bio_vec *bv;
326         sector_t sector = bio_start_sector(bio);
327         unsigned int sectors, total;
328         DECLARE_BVEC_ITER_ALL(iter_all);
329         __u16 *expected_guard;
330         int rc;
331
332         total = 0;
333         bio_for_each_segment_all(bv, bio, iter_all) {
334                 lnb = iobuf->dr_lnbs[index];
335                 expected_guard = lnb->lnb_guards;
336                 sectors = bv->bv_len / sector_size;
337                 if (lnb->lnb_guard_rpc) {
338                         rc = bio_dif_compare(expected_guard, bio_prot_buf,
339                                              sectors, bi->tuple_size);
340                         if (rc)
341                                 return rc;
342                 }
343
344                 sector += sectors;
345                 bio_prot_buf += sectors * bi->tuple_size;
346                 total += sectors * bi->tuple_size;
347                 LASSERT(total <= bip_size(bio->bi_integrity));
348                 index++;
349         }
350         return 0;
351 }
352
353 static int osd_bio_integrity_handle(struct osd_device *osd, struct bio *bio,
354                                     struct osd_iobuf *iobuf,
355                                     int start_page_idx, bool fault_inject,
356                                     bool integrity_enabled)
357 {
358         struct super_block *sb = osd_sb(osd);
359         integrity_gen_fn *generate_fn = NULL;
360         integrity_vrfy_fn *verify_fn = NULL;
361         int rc;
362
363         ENTRY;
364
365         if (!integrity_enabled)
366                 RETURN(0);
367
368         rc = osd_get_integrity_profile(osd, &generate_fn, &verify_fn);
369         if (rc)
370                 RETURN(rc);
371
372         rc = bio_integrity_prep_fn(bio, generate_fn, verify_fn);
373         if (rc)
374                 RETURN(rc);
375
376         /* Verify and inject fault only when writing */
377         if (iobuf->dr_rw == 1) {
378                 if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_CMP))) {
379                         rc = osd_bio_integrity_compare(bio, sb->s_bdev, iobuf,
380                                                        start_page_idx);
381                         if (rc)
382                                 RETURN(rc);
383                 }
384
385                 if (unlikely(fault_inject))
386                         bio_integrity_fault_inject(bio);
387         }
388
389         RETURN(0);
390 }
391
392 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
393 #  ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
394 static void dio_integrity_complete_routine(struct bio *bio)
395 #  else
396 static void dio_integrity_complete_routine(struct bio *bio, int error)
397 #  endif
398 {
399         struct osd_bio_private *bio_private = bio->bi_private;
400
401         bio->bi_private = bio_private->obp_iobuf;
402         osd_dio_complete_routine(bio, error);
403
404         OBD_FREE_PTR(bio_private);
405 }
406 #endif /* HAVE_BIO_INTEGRITY_PREP_FN */
407 #else  /* !CONFIG_BLK_DEV_INTEGRITY */
408 #define osd_bio_integrity_handle(osd, bio, iobuf, start_page_idx, \
409                                  fault_inject, integrity_enabled) 0
410 #endif /* CONFIG_BLK_DEV_INTEGRITY */
411
412 static int osd_bio_init(struct bio *bio, struct osd_iobuf *iobuf,
413                         bool integrity_enabled, int start_page_idx,
414                         struct osd_bio_private **pprivate)
415 {
416         ENTRY;
417
418         *pprivate = NULL;
419
420 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
421         if (integrity_enabled) {
422                 struct osd_bio_private *bio_private = NULL;
423
424                 OBD_ALLOC_GFP(bio_private, sizeof(*bio_private), GFP_NOIO);
425                 if (bio_private == NULL)
426                         RETURN(-ENOMEM);
427                 bio->bi_end_io = dio_integrity_complete_routine;
428                 bio->bi_private = bio_private;
429                 bio_private->obp_start_page_idx = start_page_idx;
430                 bio_private->obp_iobuf = iobuf;
431                 *pprivate = bio_private;
432         } else
433 #endif
434         {
435                 bio->bi_end_io = dio_complete_routine;
436                 bio->bi_private = iobuf;
437         }
438
439         RETURN(0);
440 }
441
442 static void osd_mark_page_io_done(struct osd_iobuf *iobuf,
443                                   struct inode *inode,
444                                   sector_t start_blocks,
445                                   sector_t count)
446 {
447         struct niobuf_local *lnb;
448         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
449         pgoff_t pg_start, pg_end;
450
451         pg_start = start_blocks / blocks_per_page;
452         if (start_blocks % blocks_per_page)
453                 pg_start++;
454         if (count >= blocks_per_page)
455                 pg_end = (start_blocks + count -
456                           blocks_per_page) / blocks_per_page;
457         else
458                 return; /* nothing to mark */
459         for ( ; pg_start <= pg_end; pg_start++) {
460                 lnb = iobuf->dr_lnbs[pg_start];
461                 lnb->lnb_flags |= OBD_BRW_DONE;
462         }
463 }
464
465 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
466                       struct osd_iobuf *iobuf, sector_t start_blocks,
467                       sector_t count)
468 {
469         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
470         struct page **pages = iobuf->dr_pages;
471         int npages = iobuf->dr_npages;
472         sector_t *blocks = iobuf->dr_blocks;
473         struct super_block *sb = inode->i_sb;
474         int sector_bits = sb->s_blocksize_bits - 9;
475         unsigned int blocksize = sb->s_blocksize;
476         struct block_device *bdev = sb->s_bdev;
477         struct osd_bio_private *bio_private = NULL;
478         struct bio *bio = NULL;
479         int bio_start_page_idx;
480         struct page *page;
481         unsigned int page_offset;
482         sector_t sector;
483         int nblocks;
484         int block_idx, block_idx_end;
485         int page_idx, page_idx_start;
486         int i;
487         int rc = 0;
488         bool fault_inject;
489         bool integrity_enabled;
490         struct blk_plug plug;
491         int blocks_left_page;
492
493         ENTRY;
494
495         fault_inject = OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT);
496         LASSERT(iobuf->dr_npages == npages);
497
498         integrity_enabled = bdev_integrity_enabled(bdev, iobuf->dr_rw);
499
500         osd_brw_stats_update(osd, iobuf);
501         iobuf->dr_start_time = ktime_get();
502
503         if (!count)
504                 count = npages * blocks_per_page;
505         block_idx_end = start_blocks + count;
506
507         blk_start_plug(&plug);
508
509         page_idx_start = start_blocks / blocks_per_page;
510         for (page_idx = page_idx_start, block_idx = start_blocks;
511              block_idx < block_idx_end; page_idx++,
512              block_idx += blocks_left_page) {
513                 page = pages[page_idx];
514                 LASSERT(page_idx < iobuf->dr_npages);
515
516                 i = block_idx % blocks_per_page;
517                 blocks_left_page = blocks_per_page - i;
518                 for (page_offset = i * blocksize; i < blocks_left_page;
519                      i += nblocks, page_offset += blocksize * nblocks) {
520                         nblocks = 1;
521
522                         if (blocks[block_idx + i] == 0) {  /* hole */
523                                 LASSERTF(iobuf->dr_rw == 0,
524                                          "page_idx %u, block_idx %u, i %u,"
525                                          "start_blocks: %llu, count: %llu, npages: %d\n",
526                                          page_idx, block_idx, i,
527                                          (unsigned long long)start_blocks,
528                                          (unsigned long long)count, npages);
529                                 memset(kmap(page) + page_offset, 0, blocksize);
530                                 kunmap(page);
531                                 continue;
532                         }
533
534                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
535
536                         /* Additional contiguous file blocks? */
537                         while (i + nblocks < blocks_left_page &&
538                                (sector + (nblocks << sector_bits)) ==
539                                ((sector_t)blocks[block_idx + i + nblocks] <<
540                                  sector_bits))
541                                 nblocks++;
542
543                         if (bio && can_be_merged(bio, sector) &&
544                             bio_add_page(bio, page, blocksize * nblocks,
545                                          page_offset) != 0)
546                                 continue;       /* added this frag OK */
547
548                         if (bio != NULL) {
549                                 struct request_queue *q = bio_get_queue(bio);
550                                 unsigned int bi_size = bio_sectors(bio) << 9;
551
552                                 /* Dang! I have to fragment this I/O */
553                                 CDEBUG(D_INODE,
554                                        "bio++ sz %d vcnt %d(%d) sectors %d(%d) psg %d(%d)\n",
555                                        bi_size, bio->bi_vcnt, bio->bi_max_vecs,
556                                        bio_sectors(bio),
557                                        queue_max_sectors(q),
558                                        osd_bio_nr_segs(bio),
559                                        queue_max_segments(q));
560                                 rc = osd_bio_integrity_handle(osd, bio,
561                                         iobuf, bio_start_page_idx,
562                                         fault_inject, integrity_enabled);
563                                 if (rc) {
564                                         bio_put(bio);
565                                         goto out;
566                                 }
567
568                                 record_start_io(iobuf, bi_size);
569                                 osd_submit_bio(iobuf->dr_rw, bio);
570                         }
571
572                         bio_start_page_idx = page_idx;
573                         /* allocate new bio */
574                         bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
575                                         (block_idx_end - block_idx +
576                                          blocks_left_page - 1)));
577                         if (bio == NULL) {
578                                 CERROR("Can't allocate bio %u pages\n",
579                                        block_idx_end - block_idx +
580                                        blocks_left_page - 1);
581                                 rc = -ENOMEM;
582                                 goto out;
583                         }
584
585                         bio_set_dev(bio, bdev);
586                         bio_set_sector(bio, sector);
587                         bio->bi_opf = iobuf->dr_rw ? WRITE : READ;
588                         rc = osd_bio_init(bio, iobuf, integrity_enabled,
589                                           bio_start_page_idx, &bio_private);
590                         if (rc) {
591                                 bio_put(bio);
592                                 goto out;
593                         }
594
595                         rc = bio_add_page(bio, page,
596                                           blocksize * nblocks, page_offset);
597                         LASSERT(rc != 0);
598                 }
599         }
600
601         if (bio != NULL) {
602                 rc = osd_bio_integrity_handle(osd, bio, iobuf,
603                                               bio_start_page_idx,
604                                               fault_inject,
605                                               integrity_enabled);
606                 if (rc) {
607                         bio_put(bio);
608                         goto out;
609                 }
610
611                 record_start_io(iobuf, bio_sectors(bio) << 9);
612                 osd_submit_bio(iobuf->dr_rw, bio);
613                 rc = 0;
614         }
615
616 out:
617         blk_finish_plug(&plug);
618
619         /* in order to achieve better IO throughput, we don't wait for writes
620          * completion here. instead we proceed with transaction commit in
621          * parallel and wait for IO completion once transaction is stopped
622          * see osd_trans_stop() for more details -bzzz
623          */
624         if (iobuf->dr_rw == 0 || fault_inject) {
625                 wait_event(iobuf->dr_wait,
626                            atomic_read(&iobuf->dr_numreqs) == 0);
627                 osd_fini_iobuf(osd, iobuf);
628         }
629
630         if (rc == 0) {
631                 rc = iobuf->dr_error;
632         } else {
633                 if (bio_private)
634                         OBD_FREE_PTR(bio_private);
635         }
636
637         /* Write only now */
638         if (rc == 0 && iobuf->dr_rw)
639                 osd_mark_page_io_done(iobuf, inode,
640                                       start_blocks, count);
641
642         RETURN(rc);
643 }
644
645 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
646                                    struct niobuf_local *lnb, int maxlnb)
647 {
648         int rc = 0;
649         ENTRY;
650
651         *nrpages = 0;
652
653         while (len > 0) {
654                 int poff = offset & (PAGE_SIZE - 1);
655                 int plen = PAGE_SIZE - poff;
656
657                 if (*nrpages >= maxlnb) {
658                         rc = -EOVERFLOW;
659                         break;
660                 }
661
662                 if (plen > len)
663                         plen = len;
664                 lnb->lnb_file_offset = offset;
665                 lnb->lnb_page_offset = poff;
666                 lnb->lnb_len = plen;
667                 /* lnb->lnb_flags = rnb->rnb_flags; */
668                 lnb->lnb_flags = 0;
669                 lnb->lnb_page = NULL;
670                 lnb->lnb_rc = 0;
671                 lnb->lnb_guard_rpc = 0;
672                 lnb->lnb_guard_disk = 0;
673                 lnb->lnb_locked = 0;
674
675                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
676                          (long long) len);
677                 offset += plen;
678                 len -= plen;
679                 lnb++;
680                 (*nrpages)++;
681         }
682
683         RETURN(rc);
684 }
685
686 static struct page *osd_get_page(const struct lu_env *env, struct dt_object *dt,
687                                  loff_t offset, gfp_t gfp_mask, bool cache)
688 {
689         struct osd_thread_info *oti = osd_oti_get(env);
690         struct inode *inode = osd_dt_obj(dt)->oo_inode;
691         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
692         struct page *page;
693         int cur;
694
695         LASSERT(inode);
696
697         if (cache) {
698                 page = find_or_create_page(inode->i_mapping,
699                                            offset >> PAGE_SHIFT, gfp_mask);
700
701                 if (likely(page)) {
702                         LASSERT(!PagePrivate2(page));
703                         wait_on_page_writeback(page);
704                 } else {
705                         lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
706                 }
707
708                 return page;
709         }
710
711         if (inode->i_mapping->nrpages) {
712                 /* consult with pagecache, but do not create new pages */
713                 /* this is normally used once */
714                 page = find_lock_page(inode->i_mapping, offset >> PAGE_SHIFT);
715                 if (page) {
716                         wait_on_page_writeback(page);
717                         return page;
718                 }
719         }
720
721         LASSERT(oti->oti_dio_pages);
722         cur = oti->oti_dio_pages_used;
723         page = oti->oti_dio_pages[cur];
724
725         if (unlikely(!page)) {
726                 LASSERT(cur < PTLRPC_MAX_BRW_PAGES);
727                 page = alloc_page(gfp_mask);
728                 if (!page)
729                         return NULL;
730                 oti->oti_dio_pages[cur] = page;
731                 SetPagePrivate2(page);
732                 lock_page(page);
733         }
734
735         ClearPageUptodate(page);
736         page->index = offset >> PAGE_SHIFT;
737         oti->oti_dio_pages_used++;
738
739         return page;
740 }
741
742 /*
743  * there are following "locks":
744  * journal_start
745  * i_mutex
746  * page lock
747  *
748  * osd write path:
749  *  - lock page(s)
750  *  - journal_start
751  *  - truncate_sem
752  *
753  * ext4 vmtruncate:
754  *  - lock pages, unlock
755  *  - journal_start
756  *  - lock partial page
757  *  - i_data_sem
758  *
759  */
760
761 /**
762  * Unlock and release pages loaded by osd_bufs_get()
763  *
764  * Unlock \a npages pages from \a lnb and drop the refcount on them.
765  *
766  * \param env           thread execution environment
767  * \param dt            dt object undergoing IO (OSD object + methods)
768  * \param lnb           array of pages undergoing IO
769  * \param npages        number of pages in \a lnb
770  *
771  * \retval 0            always
772  */
773 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
774                         struct niobuf_local *lnb, int npages)
775 {
776         struct osd_thread_info *oti = osd_oti_get(env);
777         struct pagevec pvec;
778         int i;
779
780         ll_pagevec_init(&pvec, 0);
781
782         for (i = 0; i < npages; i++) {
783                 struct page *page = lnb[i].lnb_page;
784
785                 if (page == NULL)
786                         continue;
787
788                 /* if the page isn't cached, then reset uptodate
789                  * to prevent reuse
790                  */
791                 if (PagePrivate2(page)) {
792                         oti->oti_dio_pages_used--;
793                 } else {
794                         if (lnb[i].lnb_locked)
795                                 unlock_page(page);
796                         if (pagevec_add(&pvec, page) == 0)
797                                 pagevec_release(&pvec);
798                 }
799
800                 lnb[i].lnb_page = NULL;
801         }
802
803         LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used);
804
805         /* Release any partial pagevec */
806         pagevec_release(&pvec);
807
808         RETURN(0);
809 }
810
811 /**
812  * Load and lock pages undergoing IO
813  *
814  * Pages as described in the \a lnb array are fetched (from disk or cache)
815  * and locked for IO by the caller.
816  *
817  * DLM locking protects us from write and truncate competing for same region,
818  * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
819  * It's possible the writeout on a such a page is in progress when we access
820  * it. It's also possible that during this writeout we put new (partial) data
821  * into the page, but won't be able to proceed in filter_commitrw_write().
822  * Therefore, just wait for writeout completion as it should be rare enough.
823  *
824  * \param env           thread execution environment
825  * \param dt            dt object undergoing IO (OSD object + methods)
826  * \param pos           byte offset of IO start
827  * \param len           number of bytes of IO
828  * \param lnb           array of extents undergoing IO
829  * \param rw            read or write operation, and other flags
830  * \param capa          capabilities
831  *
832  * \retval pages        (zero or more) loaded successfully
833  * \retval -ENOMEM      on memory/page allocation error
834  */
835 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
836                         loff_t pos, ssize_t len, struct niobuf_local *lnb,
837                         int maxlnb, enum dt_bufs_type rw)
838 {
839         struct osd_thread_info *oti = osd_oti_get(env);
840         struct osd_object *obj = osd_dt_obj(dt);
841         struct osd_device *osd   = osd_obj2dev(obj);
842         int npages, i, iosize, rc = 0;
843         bool cache, write;
844         loff_t fsize;
845         gfp_t gfp_mask;
846
847         LASSERT(obj->oo_inode);
848
849         rc = osd_map_remote_to_local(pos, len, &npages, lnb, maxlnb);
850         if (rc)
851                 RETURN(rc);
852
853         write = rw & DT_BUFS_TYPE_WRITE;
854
855         fsize = lnb[npages - 1].lnb_file_offset + lnb[npages - 1].lnb_len;
856         iosize = fsize - lnb[0].lnb_file_offset;
857         fsize = max(fsize, i_size_read(obj->oo_inode));
858
859         cache = rw & DT_BUFS_TYPE_READAHEAD;
860         if (cache)
861                 goto bypass_checks;
862
863         cache = osd_use_page_cache(osd);
864         while (cache) {
865                 if (write) {
866                         if (!osd->od_writethrough_cache) {
867                                 cache = false;
868                                 break;
869                         }
870                         if (iosize > osd->od_writethrough_max_iosize) {
871                                 cache = false;
872                                 break;
873                         }
874                 } else {
875                         if (!osd->od_read_cache) {
876                                 cache = false;
877                                 break;
878                         }
879                         if (iosize > osd->od_readcache_max_iosize) {
880                                 cache = false;
881                                 break;
882                         }
883                 }
884                 /* don't use cache on large files */
885                 if (osd->od_readcache_max_filesize &&
886                     fsize > osd->od_readcache_max_filesize)
887                         cache = false;
888                 break;
889         }
890
891 bypass_checks:
892         if (!cache && unlikely(!oti->oti_dio_pages)) {
893                 OBD_ALLOC_PTR_ARRAY_LARGE(oti->oti_dio_pages,
894                                           PTLRPC_MAX_BRW_PAGES);
895                 if (!oti->oti_dio_pages)
896                         return -ENOMEM;
897         }
898
899         /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
900         gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
901                                              GFP_HIGHUSER;
902         for (i = 0; i < npages; i++, lnb++) {
903                 lnb->lnb_page = osd_get_page(env, dt, lnb->lnb_file_offset,
904                                              gfp_mask, cache);
905                 if (lnb->lnb_page == NULL)
906                         GOTO(cleanup, rc = -ENOMEM);
907
908                 lnb->lnb_locked = 1;
909         }
910
911 #if 0
912         /* XXX: this version doesn't invalidate cached pages, but use them */
913         if (!cache && write && obj->oo_inode->i_mapping->nrpages) {
914                 /* do not allow data aliasing, invalidate pagecache */
915                 /* XXX: can be quite expensive in mixed case */
916                 invalidate_mapping_pages(obj->oo_inode->i_mapping,
917                                 lnb[0].lnb_file_offset >> PAGE_SHIFT,
918                                 lnb[npages - 1].lnb_file_offset >> PAGE_SHIFT);
919         }
920 #endif
921
922         RETURN(i);
923
924 cleanup:
925         if (i > 0)
926                 osd_bufs_put(env, dt, lnb - i, i);
927         return rc;
928 }
929 /* Borrow @ext4_chunk_trans_blocks */
930 static int osd_chunk_trans_blocks(struct inode *inode, int nrblocks)
931 {
932         ldiskfs_group_t groups;
933         int gdpblocks;
934         int idxblocks;
935         int depth;
936         int ret;
937
938         depth = ext_depth(inode);
939         idxblocks = depth * 2;
940
941         /*
942          * Now let's see how many group bitmaps and group descriptors need
943          * to account.
944          */
945         groups = idxblocks + 1;
946         gdpblocks = groups;
947         if (groups > LDISKFS_SB(inode->i_sb)->s_groups_count)
948                 groups = LDISKFS_SB(inode->i_sb)->s_groups_count;
949         if (gdpblocks > LDISKFS_SB(inode->i_sb)->s_gdb_count)
950                 gdpblocks = LDISKFS_SB(inode->i_sb)->s_gdb_count;
951
952         /* bitmaps and block group descriptor blocks */
953         ret = idxblocks + groups + gdpblocks;
954
955         /* Blocks for super block, inode, quota and xattr blocks */
956         ret += LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
957
958         return ret;
959 }
960
961 #ifdef HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS
962 static int osd_extend_restart_trans(handle_t *handle, int needed,
963                                     struct inode *inode)
964 {
965         int rc;
966
967         rc = ldiskfs_journal_ensure_credits(handle, needed,
968                 ldiskfs_trans_default_revoke_credits(inode->i_sb));
969         /* this means journal has been restarted */
970         if (rc > 0)
971                 rc = 0;
972
973         return rc;
974 }
975 #else
976 static int osd_extend_restart_trans(handle_t *handle, int needed,
977                                     struct inode *inode)
978 {
979         int rc;
980
981         if (ldiskfs_handle_has_enough_credits(handle, needed))
982                 return 0;
983         rc = ldiskfs_journal_extend(handle,
984                                 needed - handle->h_buffer_credits);
985         if (rc <= 0)
986                 return rc;
987
988         return ldiskfs_journal_restart(handle, needed);
989 }
990 #endif /* HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS */
991
992 static int osd_ldiskfs_map_write(struct inode *inode, struct osd_iobuf *iobuf,
993                                  struct osd_device *osd, sector_t start_blocks,
994                                  sector_t count, loff_t *disk_size,
995                                  __u64 user_size)
996 {
997         /* if file has grown, take user_size into account */
998         if (user_size && *disk_size > user_size)
999                 *disk_size = user_size;
1000
1001         spin_lock(&inode->i_lock);
1002         if (*disk_size > i_size_read(inode)) {
1003                 i_size_write(inode, *disk_size);
1004                 LDISKFS_I(inode)->i_disksize = *disk_size;
1005                 spin_unlock(&inode->i_lock);
1006                 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
1007         } else {
1008                 spin_unlock(&inode->i_lock);
1009         }
1010
1011         /*
1012          * We don't do stats here as in read path because
1013          * write is async: we'll do this in osd_put_bufs()
1014          */
1015         return osd_do_bio(osd, inode, iobuf, start_blocks, count);
1016 }
1017
1018 static unsigned int osd_extent_bytes(const struct osd_device *o)
1019 {
1020         unsigned int *extent_bytes_ptr =
1021                         raw_cpu_ptr(o->od_extent_bytes_percpu);
1022
1023         if (likely(*extent_bytes_ptr))
1024                 return *extent_bytes_ptr;
1025
1026         /* initialize on first access or CPU hotplug */
1027         if (!ldiskfs_has_feature_extents(osd_sb(o)))
1028                 *extent_bytes_ptr = 1 << osd_sb(o)->s_blocksize_bits;
1029         else
1030                 *extent_bytes_ptr = OSD_DEFAULT_EXTENT_BYTES;
1031
1032         return *extent_bytes_ptr;
1033 }
1034
1035 #define EXTENT_BYTES_DECAY 64
1036 static void osd_decay_extent_bytes(struct osd_device *osd,
1037                                    unsigned int new_bytes)
1038 {
1039         unsigned int old_bytes;
1040
1041         if (!ldiskfs_has_feature_extents(osd_sb(osd)))
1042                 return;
1043
1044         old_bytes = osd_extent_bytes(osd);
1045         *raw_cpu_ptr(osd->od_extent_bytes_percpu) =
1046                 (old_bytes * (EXTENT_BYTES_DECAY - 1) +
1047                  min(new_bytes, OSD_DEFAULT_EXTENT_BYTES) +
1048                  EXTENT_BYTES_DECAY - 1) / EXTENT_BYTES_DECAY;
1049 }
1050
1051 static int osd_ldiskfs_map_inode_pages(struct inode *inode,
1052                                        struct osd_iobuf *iobuf,
1053                                        struct osd_device *osd,
1054                                        int create, __u64 user_size,
1055                                        int check_credits,
1056                                        struct thandle *thandle)
1057 {
1058         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
1059         int rc = 0, i = 0, mapped_index = 0;
1060         struct page *fp = NULL;
1061         int clen = 0;
1062         pgoff_t max_page_index;
1063         handle_t *handle = NULL;
1064         sector_t start_blocks = 0, count = 0;
1065         loff_t disk_size = 0;
1066         struct page **page = iobuf->dr_pages;
1067         int pages = iobuf->dr_npages;
1068         sector_t *blocks = iobuf->dr_blocks;
1069         struct niobuf_local *lnb1, *lnb2;
1070         loff_t size1, size2;
1071
1072         max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
1073
1074         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
1075                 inode->i_ino, pages, (*page)->index);
1076
1077         if (create) {
1078                 create = LDISKFS_GET_BLOCKS_CREATE;
1079                 handle = ldiskfs_journal_current_handle();
1080                 LASSERT(handle != NULL);
1081                 rc = osd_attach_jinode(inode);
1082                 if (rc)
1083                         return rc;
1084                 disk_size = i_size_read(inode);
1085                 /* if disk_size is already bigger than specified user_size,
1086                  * ignore user_size
1087                  */
1088                 if (disk_size > user_size)
1089                         user_size = 0;
1090         }
1091         /* pages are sorted already. so, we just have to find
1092          * contig. space and process them properly
1093          */
1094         while (i < pages) {
1095                 long blen, total = 0, previous_total = 0;
1096                 struct ldiskfs_map_blocks map = { 0 };
1097
1098                 if (fp == NULL) { /* start new extent */
1099                         fp = *page++;
1100                         clen = 1;
1101                         if (++i != pages)
1102                                 continue;
1103                 } else if (fp->index + clen == (*page)->index) {
1104                         /* continue the extent */
1105                         page++;
1106                         clen++;
1107                         if (++i != pages)
1108                                 continue;
1109                 }
1110                 if (fp->index + clen >= max_page_index)
1111                         GOTO(cleanup, rc = -EFBIG);
1112                 /* process found extent */
1113                 map.m_lblk = fp->index * blocks_per_page;
1114                 map.m_len = blen = clen * blocks_per_page;
1115 cont_map:
1116                 /**
1117                  * We might restart transaction for block allocations,
1118                  * in order to make sure data ordered mode, issue IO, disk
1119                  * size update and block allocations need be within same
1120                  * transaction to make sure consistency.
1121                  */
1122                 if (handle && check_credits) {
1123                         struct osd_thandle *oh;
1124
1125                         LASSERT(thandle != NULL);
1126                         oh = container_of(thandle, struct osd_thandle,
1127                                           ot_super);
1128                         /*
1129                          * only issue IO if restart transaction needed,
1130                          * as update disk size need hold inode lock, we
1131                          * want to avoid that as much as possible.
1132                          */
1133                         if (oh->oh_declared_ext <= 0) {
1134                                 rc = osd_ldiskfs_map_write(inode,
1135                                         iobuf, osd, start_blocks,
1136                                         count, &disk_size, user_size);
1137                                 if (rc)
1138                                         GOTO(cleanup, rc);
1139                                 thandle->th_restart_tran = 1;
1140                                 GOTO(cleanup, rc = -EAGAIN);
1141                         }
1142
1143                         if (OBD_FAIL_CHECK(OBD_FAIL_OST_RESTART_IO))
1144                                 oh->oh_declared_ext = 0;
1145                         else
1146                                 oh->oh_declared_ext--;
1147                 }
1148                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
1149                 if (rc >= 0) {
1150                         int c = 0;
1151
1152                         for (; total < blen && c < map.m_len; c++, total++) {
1153                                 if (rc == 0) {
1154                                         *(blocks + total) = 0;
1155                                         total++;
1156                                         break;
1157                                 }
1158                                 if ((map.m_flags & LDISKFS_MAP_UNWRITTEN) &&
1159                                     !create) {
1160                                         /* don't try to read allocated, but
1161                                          * unwritten blocks, instead fill the
1162                                          * patches with zeros in osd_do_bio() */
1163                                         *(blocks + total) = 0;
1164                                         continue;
1165                                 }
1166                                 *(blocks + total) = map.m_pblk + c;
1167                                 /* unmap any possible underlying
1168                                  * metadata from the block device
1169                                  * mapping.  b=6998.
1170                                  */
1171                                 if ((map.m_flags & LDISKFS_MAP_NEW) &&
1172                                     create)
1173                                         clean_bdev_aliases(inode->i_sb->s_bdev,
1174                                                            map.m_pblk + c, 1);
1175                         }
1176                         rc = 0;
1177                 }
1178
1179                 if (rc == 0 && create) {
1180                         count += (total - previous_total);
1181                         mapped_index = (count + blocks_per_page -
1182                                         1) / blocks_per_page - 1;
1183                         lnb1 = iobuf->dr_lnbs[i - clen];
1184                         lnb2 = iobuf->dr_lnbs[mapped_index];
1185                         size1 = lnb1->lnb_file_offset -
1186                                 (lnb1->lnb_file_offset % PAGE_SIZE) +
1187                                 (total << inode->i_blkbits);
1188                         size2 = lnb2->lnb_file_offset + lnb2->lnb_len;
1189
1190                         if (size1 > size2)
1191                                 size1 = size2;
1192                         if (size1 > disk_size)
1193                                 disk_size = size1;
1194                 }
1195
1196                 if (rc == 0 && total < blen) {
1197                         /*
1198                          * decay extent blocks if we could not
1199                          * allocate extent once.
1200                          */
1201                         osd_decay_extent_bytes(osd,
1202                                 (total - previous_total) << inode->i_blkbits);
1203                         map.m_lblk = fp->index * blocks_per_page + total;
1204                         map.m_len = blen - total;
1205                         previous_total = total;
1206                         goto cont_map;
1207                 }
1208                 if (rc != 0)
1209                         GOTO(cleanup, rc);
1210                 /*
1211                  * decay extent blocks if we could allocate
1212                  * good large(1M) extent.
1213                  */
1214                 if (previous_total == 0 &&
1215                     total >= OSD_DEFAULT_EXTENT_BYTES >> inode->i_blkbits)
1216                         osd_decay_extent_bytes(osd,
1217                                                total << inode->i_blkbits);
1218                 /* look for next extent */
1219                 fp = NULL;
1220                 blocks += blocks_per_page * clen;
1221         }
1222 cleanup:
1223         if (rc == 0 && create &&
1224             start_blocks < pages * blocks_per_page) {
1225                 rc = osd_ldiskfs_map_write(inode, iobuf, osd, start_blocks,
1226                                            count, &disk_size, user_size);
1227                 LASSERT(start_blocks + count == pages * blocks_per_page);
1228         }
1229         return rc;
1230 }
1231
1232 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1233                           struct niobuf_local *lnb, int npages)
1234 {
1235         struct osd_thread_info *oti   = osd_oti_get(env);
1236         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
1237         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
1238         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
1239         ktime_t start, end;
1240         s64 timediff;
1241         ssize_t isize;
1242         __s64  maxidx;
1243         int i, rc = 0;
1244
1245         LASSERT(inode);
1246
1247         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1248         if (unlikely(rc != 0))
1249                 RETURN(rc);
1250
1251         isize = i_size_read(inode);
1252         maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1253
1254         start = ktime_get();
1255         for (i = 0; i < npages; i++) {
1256
1257                 /*
1258                  * till commit the content of the page is undefined
1259                  * we'll set it uptodate once bulk is done. otherwise
1260                  * subsequent reads can access non-stable data
1261                  */
1262                 ClearPageUptodate(lnb[i].lnb_page);
1263
1264                 if (lnb[i].lnb_len == PAGE_SIZE)
1265                         continue;
1266
1267                 if (maxidx >= lnb[i].lnb_page->index) {
1268                         osd_iobuf_add_page(iobuf, &lnb[i]);
1269                 } else {
1270                         long off;
1271                         char *p = kmap(lnb[i].lnb_page);
1272
1273                         off = lnb[i].lnb_page_offset;
1274                         if (off)
1275                                 memset(p, 0, off);
1276                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1277                               ~PAGE_MASK;
1278                         if (off)
1279                                 memset(p + off, 0, PAGE_SIZE - off);
1280                         kunmap(lnb[i].lnb_page);
1281                 }
1282         }
1283         end = ktime_get();
1284         timediff = ktime_us_delta(end, start);
1285         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1286
1287         if (iobuf->dr_npages) {
1288                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1289                                                  0, 0, NULL);
1290                 if (likely(rc == 0)) {
1291                         rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1292                         /* do IO stats for preparation reads */
1293                         osd_fini_iobuf(osd, iobuf);
1294                 }
1295         }
1296         RETURN(rc);
1297 }
1298
1299 struct osd_fextent {
1300         sector_t        start;
1301         sector_t        end;
1302         unsigned int    mapped:1;
1303 };
1304
1305 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1306                          struct osd_fextent *cached_extent)
1307 {
1308         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1309         sector_t block = offset >> inode->i_blkbits;
1310         sector_t start;
1311         struct fiemap_extent_info fei = { 0 };
1312         struct fiemap_extent fe = { 0 };
1313         int rc;
1314
1315         if (block >= cached_extent->start && block < cached_extent->end)
1316                 return cached_extent->mapped;
1317
1318         if (i_size_read(inode) == 0)
1319                 return 0;
1320
1321         /* Beyond EOF, must not be mapped */
1322         if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1323                 return 0;
1324
1325         fei.fi_extents_max = 1;
1326         fei.fi_extents_start = &fe;
1327
1328         rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1329         if (rc != 0)
1330                 return 0;
1331
1332         start = fe.fe_logical >> inode->i_blkbits;
1333         if (fei.fi_extents_mapped == 0) {
1334                 /* a special case - no extent found at this offset and forward.
1335                  * we can consider this as a hole to EOF. it's safe to cache
1336                  * as other threads can not allocate/punch blocks this thread
1337                  * is working on (LDLM). */
1338                 cached_extent->start = block;
1339                 cached_extent->end = i_size_read(inode) >> inode->i_blkbits;
1340                 cached_extent->mapped = 0;
1341                 return 0;
1342         }
1343
1344         if (start > block) {
1345                 cached_extent->start = block;
1346                 cached_extent->end = start;
1347                 cached_extent->mapped = 0;
1348         } else {
1349                 cached_extent->start = start;
1350                 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1351                                       inode->i_blkbits;
1352                 cached_extent->mapped = 1;
1353         }
1354
1355         return cached_extent->mapped;
1356 }
1357
1358 static int osd_declare_write_commit(const struct lu_env *env,
1359                                     struct dt_object *dt,
1360                                     struct niobuf_local *lnb, int npages,
1361                                     struct thandle *handle)
1362 {
1363         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1364         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1365         struct osd_thandle      *oh;
1366         int                     extents = 0;
1367         int                     depth;
1368         int                     i;
1369         int                     newblocks = 0;
1370         int                     rc = 0;
1371         int                     credits = 0;
1372         long long               quota_space = 0;
1373         struct osd_fextent      mapped = { 0 }, extent = { 0 };
1374         enum osd_quota_local_flags local_flags = 0;
1375         enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1376         unsigned int            extent_bytes;
1377         ENTRY;
1378
1379         LASSERT(handle != NULL);
1380         oh = container_of(handle, struct osd_thandle, ot_super);
1381         LASSERT(oh->ot_handle == NULL);
1382
1383         /*
1384          * We track a decaying average extent blocks per filesystem,
1385          * for most of time, it will be 1M, with filesystem becoming
1386          * heavily-fragmented, it will be reduced to 4K at the worst.
1387          */
1388         extent_bytes = osd_extent_bytes(osd);
1389         LASSERT(extent_bytes >= (1 << osd_sb(osd)->s_blocksize));
1390
1391         /* calculate number of extents (probably better to pass nb) */
1392         for (i = 0; i < npages; i++) {
1393                 /* ignore quota for the whole request if any page is from
1394                  * client cache or written by root.
1395                  *
1396                  * XXX once we drop the 1.8 client support, the checking
1397                  * for whether page is from cache can be simplified as:
1398                  * !(lnb[i].flags & OBD_BRW_SYNC)
1399                  *
1400                  * XXX we could handle this on per-lnb basis as done by
1401                  * grant.
1402                  */
1403                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1404                     (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1405                     OBD_BRW_FROM_GRANT)
1406                         declare_flags |= OSD_QID_FORCE;
1407
1408                 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &mapped)) {
1409                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1410                         continue;
1411                 }
1412
1413                 if (lnb[i].lnb_flags & OBD_BRW_DONE) {
1414                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1415                         continue;
1416                 }
1417
1418                 /* count only unmapped changes */
1419                 newblocks++;
1420                 if (lnb[i].lnb_file_offset != extent.end || extent.end == 0) {
1421                         if (extent.end != 0)
1422                                 extents += (extent.end - extent.start +
1423                                         extent_bytes - 1) / extent_bytes;
1424                         extent.start = lnb[i].lnb_file_offset;
1425                         extent.end = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1426                 } else {
1427                         extent.end += lnb[i].lnb_len;
1428                 }
1429
1430                 quota_space += PAGE_SIZE;
1431         }
1432
1433         credits++; /* inode */
1434         /*
1435          * overwrite case, no need to modify tree and
1436          * allocate blocks.
1437          */
1438         if (!newblocks)
1439                 goto out_declare;
1440
1441         extents += (extent.end - extent.start +
1442                     extent_bytes - 1) / extent_bytes;
1443         /*
1444          * each extent can go into new leaf causing a split
1445          * 5 is max tree depth: inode + 4 index blocks
1446          * with blockmaps, depth is 3 at most
1447          */
1448         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1449                 /*
1450                  * many concurrent threads may grow tree by the time
1451                  * our transaction starts. so, consider 2 is a min depth
1452                  */
1453                 depth = ext_depth(inode);
1454                 depth = max(depth, 1) + 1;
1455                 newblocks += depth;
1456                 credits += depth * 2 * extents;
1457         } else {
1458                 depth = 3;
1459                 newblocks += depth;
1460                 credits += depth * extents;
1461         }
1462
1463         oh->oh_declared_ext = extents;
1464
1465         /* quota space for metadata blocks */
1466         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1467
1468         /* quota space should be reported in 1K blocks */
1469         quota_space = toqb(quota_space);
1470
1471         /* each new block can go in different group (bitmap + gd) */
1472
1473         /* we can't dirty more bitmap blocks than exist */
1474         if (extents > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1475                 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1476         else
1477                 credits += extents;
1478
1479         /* we can't dirty more gd blocks than exist */
1480         if (extents > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1481                 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1482         else
1483                 credits += extents;
1484
1485 out_declare:
1486         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1487
1488         /* make sure the over quota flags were not set */
1489         lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1490
1491         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1492                                    i_projid_read(inode), quota_space, oh,
1493                                    osd_dt_obj(dt), &local_flags, declare_flags);
1494
1495         /* we need only to store the overquota flags in the first lnb for
1496          * now, once we support multiple objects BRW, this code needs be
1497          * revised.
1498          */
1499         if (local_flags & QUOTA_FL_OVER_USRQUOTA)
1500                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1501         if (local_flags & QUOTA_FL_OVER_GRPQUOTA)
1502                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1503         if (local_flags & QUOTA_FL_OVER_PRJQUOTA)
1504                 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1505
1506         if (rc == 0)
1507                 rc = osd_trunc_lock(osd_dt_obj(dt), oh, true);
1508
1509         RETURN(rc);
1510 }
1511
1512 /* Check if a block is allocated or not */
1513 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1514                             struct niobuf_local *lnb, int npages,
1515                             struct thandle *thandle, __u64 user_size)
1516 {
1517         struct osd_thread_info *oti = osd_oti_get(env);
1518         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1519         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1520         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1521         int rc = 0, i, check_credits = 0;
1522
1523         LASSERT(inode);
1524
1525         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1526         if (unlikely(rc != 0))
1527                 RETURN(rc);
1528
1529         dquot_initialize(inode);
1530
1531         for (i = 0; i < npages; i++) {
1532                 if (lnb[i].lnb_rc == -ENOSPC &&
1533                     (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1534                         /* Allow the write to proceed if overwriting an
1535                          * existing block
1536                          */
1537                         lnb[i].lnb_rc = 0;
1538                 }
1539
1540                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1541                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1542                                lnb[i].lnb_rc);
1543                         LASSERT(lnb[i].lnb_page);
1544                         generic_error_remove_page(inode->i_mapping,
1545                                                   lnb[i].lnb_page);
1546                         continue;
1547                 }
1548
1549                 if (lnb[i].lnb_flags & OBD_BRW_DONE)
1550                         continue;
1551
1552                 if (!(lnb[i].lnb_flags & OBD_BRW_MAPPED))
1553                         check_credits = 1;
1554
1555                 LASSERT(PageLocked(lnb[i].lnb_page));
1556                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1557
1558                 /*
1559                  * Since write and truncate are serialized by oo_sem, even
1560                  * partial-page truncate should not leave dirty pages in the
1561                  * page cache.
1562                  */
1563                 LASSERT(!PageDirty(lnb[i].lnb_page));
1564
1565                 SetPageUptodate(lnb[i].lnb_page);
1566
1567                 osd_iobuf_add_page(iobuf, &lnb[i]);
1568         }
1569
1570         osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1571
1572         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1573                 rc = -ENOSPC;
1574         } else if (iobuf->dr_npages > 0) {
1575                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd,
1576                                                  1, user_size,
1577                                                  check_credits,
1578                                                  thandle);
1579         } else {
1580                 /* no pages to write, no transno is needed */
1581                 thandle->th_local = 1;
1582         }
1583
1584         if (rc != 0 && !thandle->th_restart_tran)
1585                 osd_fini_iobuf(osd, iobuf);
1586
1587         osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1588
1589         if (unlikely(rc != 0 && !thandle->th_restart_tran)) {
1590                 /* if write fails, we should drop pages from the cache */
1591                 for (i = 0; i < npages; i++) {
1592                         if (lnb[i].lnb_page == NULL)
1593                                 continue;
1594                         if (!PagePrivate2(lnb[i].lnb_page)) {
1595                                 LASSERT(PageLocked(lnb[i].lnb_page));
1596                                 generic_error_remove_page(inode->i_mapping,
1597                                                           lnb[i].lnb_page);
1598                         }
1599                 }
1600         }
1601
1602         RETURN(rc);
1603 }
1604
1605 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1606                          struct niobuf_local *lnb, int npages)
1607 {
1608         struct osd_thread_info *oti = osd_oti_get(env);
1609         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1610         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1611         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1612         int rc = 0, i, cache_hits = 0, cache_misses = 0;
1613         ktime_t start, end;
1614         s64 timediff;
1615         loff_t isize;
1616
1617         LASSERT(inode);
1618
1619         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1620         if (unlikely(rc != 0))
1621                 RETURN(rc);
1622
1623         isize = i_size_read(inode);
1624
1625         start = ktime_get();
1626         for (i = 0; i < npages; i++) {
1627
1628                 if (isize <= lnb[i].lnb_file_offset)
1629                         /* If there's no more data, abort early.
1630                          * lnb->lnb_rc == 0, so it's easy to detect later.
1631                          */
1632                         break;
1633
1634                 /* instead of looking if we go beyong isize, send complete
1635                  * pages all the time
1636                  */
1637                 lnb[i].lnb_rc = lnb[i].lnb_len;
1638
1639                 /* Bypass disk read if fail_loc is set properly */
1640                 if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
1641                         SetPageUptodate(lnb[i].lnb_page);
1642
1643                 if (PageUptodate(lnb[i].lnb_page)) {
1644                         cache_hits++;
1645                         unlock_page(lnb[i].lnb_page);
1646                 } else {
1647                         cache_misses++;
1648                         osd_iobuf_add_page(iobuf, &lnb[i]);
1649                 }
1650                 /* no need to unlock in osd_bufs_put(), the sooner page is
1651                  * unlocked, the earlier another client can access it.
1652                  * notice real unlock_page() can be called few lines
1653                  * below after osd_do_bio(). lnb is a per-thread, so it's
1654                  * fine to have PG_locked and lnb_locked inconsistent here
1655                  */
1656                 lnb[i].lnb_locked = 0;
1657         }
1658         end = ktime_get();
1659         timediff = ktime_us_delta(end, start);
1660         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1661
1662         if (cache_hits != 0)
1663                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1664                                     cache_hits);
1665         if (cache_misses != 0)
1666                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1667                                     cache_misses);
1668         if (cache_hits + cache_misses != 0)
1669                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1670                                     cache_hits + cache_misses);
1671
1672         if (iobuf->dr_npages) {
1673                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1674                                                  0, 0, NULL);
1675                 if (!rc)
1676                         rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1677
1678                 /* IO stats will be done in osd_bufs_put() */
1679
1680                 /* early release to let others read data during the bulk */
1681                 for (i = 0; i < iobuf->dr_npages; i++) {
1682                         LASSERT(PageLocked(iobuf->dr_pages[i]));
1683                         if (!PagePrivate2(iobuf->dr_pages[i]))
1684                                 unlock_page(iobuf->dr_pages[i]);
1685                 }
1686         }
1687
1688         RETURN(rc);
1689 }
1690
1691 /*
1692  * XXX: Another layering violation for now.
1693  *
1694  * We don't want to use ->f_op->read methods, because generic file write
1695  *
1696  *         - serializes on ->i_sem, and
1697  *
1698  *         - does a lot of extra work like balance_dirty_pages(),
1699  *
1700  * which doesn't work for globally shared files like /last_rcvd.
1701  */
1702 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1703 {
1704         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1705
1706         memcpy(buffer, (char *)ei->i_data, buflen);
1707
1708         return  buflen;
1709 }
1710
1711 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1712 {
1713         struct buffer_head *bh;
1714         unsigned long block;
1715         int osize;
1716         int blocksize;
1717         int csize;
1718         int boffs;
1719
1720         /* prevent reading after eof */
1721         spin_lock(&inode->i_lock);
1722         if (i_size_read(inode) < *offs + size) {
1723                 loff_t diff = i_size_read(inode) - *offs;
1724
1725                 spin_unlock(&inode->i_lock);
1726                 if (diff < 0) {
1727                         CDEBUG(D_OTHER,
1728                                "size %llu is too short to read @%llu\n",
1729                                i_size_read(inode), *offs);
1730                         return -EBADR;
1731                 } else if (diff == 0) {
1732                         return 0;
1733                 } else {
1734                         size = diff;
1735                 }
1736         } else {
1737                 spin_unlock(&inode->i_lock);
1738         }
1739
1740         blocksize = 1 << inode->i_blkbits;
1741         osize = size;
1742         while (size > 0) {
1743                 block = *offs >> inode->i_blkbits;
1744                 boffs = *offs & (blocksize - 1);
1745                 csize = min(blocksize - boffs, size);
1746                 bh = __ldiskfs_bread(NULL, inode, block, 0);
1747                 if (IS_ERR(bh)) {
1748                         CERROR("%s: can't read %u@%llu on ino %lu: rc = %ld\n",
1749                                osd_ino2name(inode), csize, *offs, inode->i_ino,
1750                                PTR_ERR(bh));
1751                         return PTR_ERR(bh);
1752                 }
1753
1754                 if (bh != NULL) {
1755                         memcpy(buf, bh->b_data + boffs, csize);
1756                         brelse(bh);
1757                 } else {
1758                         memset(buf, 0, csize);
1759                 }
1760
1761                 *offs += csize;
1762                 buf += csize;
1763                 size -= csize;
1764         }
1765         return osize;
1766 }
1767
1768 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1769                         struct lu_buf *buf, loff_t *pos)
1770 {
1771         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1772         int rc;
1773
1774         /* Read small symlink from inode body as we need to maintain correct
1775          * on-disk symlinks for ldiskfs.
1776          */
1777         if (S_ISLNK(dt->do_lu.lo_header->loh_attr)) {
1778                 loff_t size = i_size_read(inode);
1779
1780                 if (buf->lb_len < size)
1781                         return -EOVERFLOW;
1782
1783                 if (size < sizeof(LDISKFS_I(inode)->i_data))
1784                         rc = osd_ldiskfs_readlink(inode, buf->lb_buf, size);
1785                 else
1786                         rc = osd_ldiskfs_read(inode, buf->lb_buf, size, pos);
1787         } else {
1788                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1789         }
1790
1791         return rc;
1792 }
1793
1794 static inline int osd_extents_enabled(struct super_block *sb,
1795                                       struct inode *inode)
1796 {
1797         if (inode != NULL) {
1798                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1799                         return 1;
1800         } else if (ldiskfs_has_feature_extents(sb)) {
1801                 return 1;
1802         }
1803         return 0;
1804 }
1805
1806 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1807                            const loff_t size, const loff_t pos,
1808                            const int blocks)
1809 {
1810         int credits, bits, bs, i;
1811
1812         bits = sb->s_blocksize_bits;
1813         bs = 1 << bits;
1814
1815         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1816          * we do not expect blockmaps on the large files,
1817          * so let's shrink it to 2 levels (4GB files)
1818          */
1819
1820         /* this is default reservation: 2 levels */
1821         credits = (blocks + 2) * 3;
1822
1823         /* actual offset is unknown, hard to optimize */
1824         if (pos == -1)
1825                 return credits;
1826
1827         /* now check for few specific cases to optimize */
1828         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1829                 /* no indirects */
1830                 credits = blocks;
1831                 /* allocate if not allocated */
1832                 if (inode == NULL) {
1833                         credits += blocks * 2;
1834                         return credits;
1835                 }
1836                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1837                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1838                         if (LDISKFS_I(inode)->i_data[i] == 0)
1839                                 credits += 2;
1840                 }
1841         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1842                 /* single indirect */
1843                 credits = blocks * 3;
1844                 if (inode == NULL ||
1845                     LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1846                         credits += 3;
1847                 else
1848                         /* The indirect block may be modified. */
1849                         credits += 1;
1850         }
1851
1852         return credits;
1853 }
1854
1855 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1856                                  const struct lu_buf *buf, loff_t _pos,
1857                                  struct thandle *handle)
1858 {
1859         struct osd_object  *obj  = osd_dt_obj(dt);
1860         struct inode       *inode = obj->oo_inode;
1861         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1862         struct osd_thandle *oh;
1863         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1864         int                 bits, bs;
1865         int                 depth, size;
1866         loff_t              pos;
1867         ENTRY;
1868
1869         LASSERT(buf != NULL);
1870         LASSERT(handle != NULL);
1871
1872         oh = container_of(handle, struct osd_thandle, ot_super);
1873         LASSERT(oh->ot_handle == NULL);
1874
1875         size = buf->lb_len;
1876         bits = sb->s_blocksize_bits;
1877         bs = 1 << bits;
1878
1879         if (_pos == -1) {
1880                 /* if this is an append, then we
1881                  * should expect cross-block record
1882                  */
1883                 pos = 0;
1884         } else {
1885                 pos = _pos;
1886         }
1887
1888         /* blocks to modify */
1889         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1890         LASSERT(blocks > 0);
1891
1892         if (inode != NULL && _pos != -1) {
1893                 /* object size in blocks */
1894                 est = (i_size_read(inode) + bs - 1) >> bits;
1895                 allocated = inode->i_blocks >> (bits - 9);
1896                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1897                         /* looks like an overwrite, no need to modify tree */
1898                         credits = blocks;
1899                         /* no need to modify i_size */
1900                         goto out;
1901                 }
1902         }
1903
1904         if (osd_extents_enabled(sb, inode)) {
1905                 /*
1906                  * many concurrent threads may grow tree by the time
1907                  * our transaction starts. so, consider 2 is a min depth
1908                  * for every level we may need to allocate a new block
1909                  * and take some entries from the old one. so, 3 blocks
1910                  * to allocate (bitmap, gd, itself) + old block - 4 per
1911                  * level.
1912                  */
1913                 depth = inode != NULL ? ext_depth(inode) : 0;
1914                 depth = max(depth, 1) + 1;
1915                 credits = depth;
1916                 /* if not append, then split may need to modify
1917                  * existing blocks moving entries into the new ones
1918                  */
1919                 if (_pos != -1)
1920                         credits += depth;
1921                 /* blocks to store data: bitmap,gd,itself */
1922                 credits += blocks * 3;
1923         } else {
1924                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1925         }
1926         /* if inode is created as part of the transaction,
1927          * then it's counted already by the creation method
1928          */
1929         if (inode != NULL)
1930                 credits++;
1931
1932 out:
1933
1934         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1935
1936         /* dt_declare_write() is usually called for system objects, such
1937          * as llog or last_rcvd files. We needn't enforce quota on those
1938          * objects, so always set the lqi_space as 0.
1939          */
1940         if (inode != NULL)
1941                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1942                                            i_gid_read(inode),
1943                                            i_projid_read(inode), 0,
1944                                            oh, obj, NULL, OSD_QID_BLK);
1945
1946         if (rc == 0)
1947                 rc = osd_trunc_lock(obj, oh, true);
1948
1949         RETURN(rc);
1950 }
1951
1952 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1953 {
1954         /* LU-2634: clear the extent format for fast symlink */
1955         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1956
1957         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1958         spin_lock(&inode->i_lock);
1959         LDISKFS_I(inode)->i_disksize = buflen;
1960         i_size_write(inode, buflen);
1961         spin_unlock(&inode->i_lock);
1962         osd_dirty_inode(inode, I_DIRTY_DATASYNC);
1963
1964         return 0;
1965 }
1966
1967 static int osd_ldiskfs_write_record(struct dt_object *dt, void *buf,
1968                                     int bufsize, int write_NUL, loff_t *offs,
1969                                     handle_t *handle)
1970 {
1971         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1972         struct buffer_head *bh        = NULL;
1973         loff_t              offset    = *offs;
1974         loff_t              new_size  = i_size_read(inode);
1975         unsigned long       block;
1976         int                 blocksize = 1 << inode->i_blkbits;
1977         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1978         int                 err = 0;
1979         int                 size;
1980         int                 boffs;
1981         int                 dirty_inode = 0;
1982         bool create, sparse, sync = false;
1983
1984         if (write_NUL) {
1985                 /*
1986                  * long symlink write does not count the NUL terminator in
1987                  * bufsize, we write it, and the inode's file size does not
1988                  * count the NUL terminator as well.
1989                  */
1990                 ((char *)buf)[bufsize] = '\0';
1991                 ++bufsize;
1992         }
1993
1994         /* only the first flag-set matters */
1995         dirty_inode = !test_and_set_bit(LDISKFS_INODE_JOURNAL_DATA,
1996                                        &ei->i_flags);
1997
1998         /* sparse checking is racy, but sparse is very rare case, leave as is */
1999         sparse = (new_size > 0 && (inode->i_blocks >> (inode->i_blkbits - 9)) <
2000                   ((new_size - 1) >> inode->i_blkbits) + 1);
2001
2002         while (bufsize > 0) {
2003                 int credits = handle->h_buffer_credits;
2004                 unsigned long last_block = (new_size == 0) ? 0 :
2005                                            (new_size - 1) >> inode->i_blkbits;
2006
2007                 if (bh)
2008                         brelse(bh);
2009
2010                 block = offset >> inode->i_blkbits;
2011                 boffs = offset & (blocksize - 1);
2012                 size = min(blocksize - boffs, bufsize);
2013                 sync = (block > last_block || new_size == 0 || sparse);
2014
2015                 if (sync)
2016                         down(&ei->i_append_sem);
2017
2018                 bh = __ldiskfs_bread(handle, inode, block, 0);
2019
2020                 if (unlikely(IS_ERR_OR_NULL(bh) && !sync))
2021                         CWARN(
2022                               "%s: adding bh without locking off %llu (block %lu, size %d, offs %llu)\n",
2023                               osd_ino2name(inode),
2024                               offset, block, bufsize, *offs);
2025
2026                 if (IS_ERR_OR_NULL(bh)) {
2027                         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2028                         int flags = LDISKFS_GET_BLOCKS_CREATE;
2029
2030                         /* while the file system is being mounted, avoid
2031                          * preallocation otherwise mount can take a long
2032                          * time as mballoc cache is cold.
2033                          * XXX: this is a workaround until we have a proper
2034                          *      fix in mballoc
2035                          * XXX: works with extent-based files only */
2036                         if (!osd->od_cl_seq)
2037                                 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
2038                         bh = __ldiskfs_bread(handle, inode, block, flags);
2039                         create = true;
2040                 } else {
2041                         if (sync) {
2042                                 up(&ei->i_append_sem);
2043                                 sync = false;
2044                         }
2045                         create = false;
2046                 }
2047                 if (IS_ERR_OR_NULL(bh)) {
2048                         if (bh == NULL) {
2049                                 err = -EIO;
2050                         } else {
2051                                 err = PTR_ERR(bh);
2052                                 bh = NULL;
2053                         }
2054
2055                         CERROR(
2056                                "%s: error reading offset %llu (block %lu, size %d, offs %llu), credits %d/%d: rc = %d\n",
2057                                osd_ino2name(inode), offset, block, bufsize,
2058                                *offs, credits, handle->h_buffer_credits, err);
2059                         break;
2060                 }
2061
2062                 err = ldiskfs_journal_get_write_access(handle, bh);
2063                 if (err) {
2064                         CERROR("journal_get_write_access() returned error %d\n",
2065                                err);
2066                         break;
2067                 }
2068                 LASSERTF(boffs + size <= bh->b_size,
2069                          "boffs %d size %d bh->b_size %lu\n",
2070                          boffs, size, (unsigned long)bh->b_size);
2071                 if (create) {
2072                         memset(bh->b_data, 0, bh->b_size);
2073                         if (sync) {
2074                                 up(&ei->i_append_sem);
2075                                 sync = false;
2076                         }
2077                 }
2078                 memcpy(bh->b_data + boffs, buf, size);
2079                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2080                 if (err)
2081                         break;
2082
2083                 if (offset + size > new_size)
2084                         new_size = offset + size;
2085                 offset += size;
2086                 bufsize -= size;
2087                 buf += size;
2088         }
2089         if (sync)
2090                 up(&ei->i_append_sem);
2091
2092         if (bh)
2093                 brelse(bh);
2094
2095         if (write_NUL)
2096                 --new_size;
2097         /* correct in-core and on-disk sizes */
2098         if (new_size > i_size_read(inode)) {
2099                 spin_lock(&inode->i_lock);
2100                 if (new_size > i_size_read(inode))
2101                         i_size_write(inode, new_size);
2102                 if (i_size_read(inode) > ei->i_disksize) {
2103                         ei->i_disksize = i_size_read(inode);
2104                         dirty_inode = 1;
2105                 }
2106                 spin_unlock(&inode->i_lock);
2107         }
2108         if (dirty_inode)
2109                 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2110
2111         if (err == 0)
2112                 *offs = offset;
2113         return err;
2114 }
2115
2116 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
2117                          const struct lu_buf *buf, loff_t *pos,
2118                          struct thandle *handle)
2119 {
2120         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
2121         struct osd_thandle      *oh;
2122         ssize_t                 result;
2123         int                     is_link;
2124
2125         LASSERT(dt_object_exists(dt));
2126
2127         LASSERT(handle != NULL);
2128         LASSERT(inode != NULL);
2129         dquot_initialize(inode);
2130
2131         /* XXX: don't check: one declared chunk can be used many times */
2132         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
2133
2134         oh = container_of(handle, struct osd_thandle, ot_super);
2135         LASSERT(oh->ot_handle->h_transaction != NULL);
2136         osd_trans_exec_op(env, handle, OSD_OT_WRITE);
2137
2138         /* Write small symlink to inode body as we need to maintain correct
2139          * on-disk symlinks for ldiskfs.
2140          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
2141          * does not count it in.
2142          */
2143         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
2144         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
2145                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
2146         else
2147                 result = osd_ldiskfs_write_record(dt, buf->lb_buf, buf->lb_len,
2148                                                   is_link, pos, oh->ot_handle);
2149         if (result == 0)
2150                 result = buf->lb_len;
2151
2152         osd_trans_exec_check(env, handle, OSD_OT_WRITE);
2153
2154         return result;
2155 }
2156
2157 static int osd_declare_fallocate(const struct lu_env *env,
2158                                  struct dt_object *dt, __u64 start, __u64 end,
2159                                  int mode, struct thandle *th)
2160 {
2161         struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2162         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2163         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2164         long long quota_space = 0;
2165         /* 5 is max tree depth. (inode + 4 index blocks) */
2166         int depth = 5;
2167         int rc;
2168
2169         ENTRY;
2170
2171         /*
2172          * mode == 0 (which is standard prealloc) and PUNCH is supported
2173          * Rest of mode options is not supported yet.
2174          */
2175         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2176                 RETURN(-EOPNOTSUPP);
2177
2178         /* disable fallocate completely */
2179         if (osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks < 0)
2180                 RETURN(-EOPNOTSUPP);
2181
2182         LASSERT(th);
2183         LASSERT(inode);
2184
2185         if (mode & FALLOC_FL_PUNCH_HOLE) {
2186                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
2187                                            i_gid_read(inode),
2188                                            i_projid_read(inode), 0, oh,
2189                                            osd_dt_obj(dt), NULL, OSD_QID_BLK);
2190                 if (rc == 0)
2191                         rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
2192                 RETURN(rc);
2193         }
2194
2195         /* quota space for metadata blocks
2196          * approximate metadata estimate should be good enough.
2197          */
2198         quota_space += PAGE_SIZE;
2199         quota_space += depth * LDISKFS_BLOCK_SIZE(osd_sb(osd));
2200
2201         /* quota space should be reported in 1K blocks */
2202         quota_space = toqb(quota_space) + toqb(end - start) +
2203                       LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
2204
2205         /* We don't need to reserve credits for whole fallocate here.
2206          * We reserve space only for metadata. Fallocate credits are
2207          * extended as required
2208          */
2209         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2210                                    i_projid_read(inode), quota_space, oh,
2211                                    osd_dt_obj(dt), NULL, OSD_QID_BLK);
2212         RETURN(rc);
2213 }
2214
2215 static int osd_fallocate_preallocate(const struct lu_env *env,
2216                                      struct dt_object *dt,
2217                                      __u64 start, __u64 end, int mode,
2218                                      struct thandle *th)
2219 {
2220         struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2221         handle_t *handle = ldiskfs_journal_current_handle();
2222         unsigned int save_credits = oh->ot_credits;
2223         struct osd_object *obj = osd_dt_obj(dt);
2224         struct inode *inode = obj->oo_inode;
2225         struct ldiskfs_map_blocks map;
2226         unsigned int credits;
2227         ldiskfs_lblk_t blen;
2228         ldiskfs_lblk_t boff;
2229         loff_t new_size = 0;
2230         int depth = 0;
2231         int flags;
2232         int rc = 0;
2233
2234         ENTRY;
2235
2236         LASSERT(dt_object_exists(dt));
2237         LASSERT(osd_invariant(obj));
2238         LASSERT(inode != NULL);
2239
2240         CDEBUG(D_INODE, "fallocate: inode #%lu: start %llu end %llu mode %d\n",
2241                inode->i_ino, start, end, mode);
2242
2243         dquot_initialize(inode);
2244
2245         LASSERT(th);
2246
2247         boff = start >> inode->i_blkbits;
2248         blen = (ALIGN(end, 1 << inode->i_blkbits) >> inode->i_blkbits) - boff;
2249
2250         /* Create and mark new extents as either zero or unwritten */
2251         flags = osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks ?
2252                 LDISKFS_GET_BLOCKS_CREATE_ZERO :
2253                 LDISKFS_GET_BLOCKS_CREATE_UNWRIT_EXT;
2254         if (mode & FALLOC_FL_KEEP_SIZE)
2255                 flags |= LDISKFS_GET_BLOCKS_KEEP_SIZE;
2256
2257         inode_lock(inode);
2258
2259         /*
2260          * We only support preallocation for extent-based file only.
2261          */
2262         if (!(ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)))
2263                 GOTO(out, rc = -EOPNOTSUPP);
2264
2265         if (!(mode & FALLOC_FL_KEEP_SIZE) && (end > i_size_read(inode) ||
2266             end > LDISKFS_I(inode)->i_disksize)) {
2267                 new_size = end;
2268                 rc = inode_newsize_ok(inode, new_size);
2269                 if (rc)
2270                         GOTO(out, rc);
2271         }
2272
2273         inode_dio_wait(inode);
2274
2275         map.m_lblk = boff;
2276         map.m_len = blen;
2277
2278         /* Don't normalize the request if it can fit in one extent so
2279          * that it doesn't get unnecessarily split into multiple extents.
2280          */
2281         if (blen <= EXT_UNWRITTEN_MAX_LEN)
2282                 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
2283
2284         /*
2285          * credits to insert 1 extent into extent tree.
2286          */
2287         credits = osd_chunk_trans_blocks(inode, blen);
2288         depth = ext_depth(inode);
2289
2290         while (rc >= 0 && blen) {
2291                 loff_t epos;
2292
2293                 /*
2294                  * Recalculate credits when extent tree depth changes.
2295                  */
2296                 if (depth != ext_depth(inode)) {
2297                         credits = osd_chunk_trans_blocks(inode, blen);
2298                         depth = ext_depth(inode);
2299                 }
2300
2301                 /* TODO: quota check */
2302                 rc = osd_extend_restart_trans(handle, credits, inode);
2303                 if (rc)
2304                         break;
2305
2306                 rc = ldiskfs_map_blocks(handle, inode, &map, flags);
2307                 if (rc <= 0) {
2308                         CDEBUG(D_INODE,
2309                                "inode #%lu: block %u: len %u: ldiskfs_map_blocks returned %d\n",
2310                                inode->i_ino, map.m_lblk, map.m_len, rc);
2311                         ldiskfs_mark_inode_dirty(handle, inode);
2312                         break;
2313                 }
2314
2315                 map.m_lblk += rc;
2316                 map.m_len = blen = blen - rc;
2317                 epos = (loff_t)map.m_lblk << inode->i_blkbits;
2318                 inode->i_ctime = current_time(inode);
2319                 if (new_size) {
2320                         if (epos > end)
2321                                 epos = end;
2322                         if (ldiskfs_update_inode_size(inode, epos) & 0x1)
2323                                 inode->i_mtime = inode->i_ctime;
2324                 } else {
2325                         if (epos > inode->i_size)
2326                                 ldiskfs_set_inode_flag(inode,
2327                                                        LDISKFS_INODE_EOFBLOCKS);
2328                 }
2329
2330                 ldiskfs_mark_inode_dirty(handle, inode);
2331         }
2332
2333 out:
2334         /* extand credits if needed for operations such as attribute set */
2335         if (rc >= 0)
2336                 rc = osd_extend_restart_trans(handle, save_credits, inode);
2337
2338         inode_unlock(inode);
2339
2340         RETURN(rc);
2341 }
2342
2343 static int osd_fallocate_punch(const struct lu_env *env, struct dt_object *dt,
2344                                __u64 start, __u64 end, int mode,
2345                                struct thandle *th)
2346 {
2347         struct osd_object *obj = osd_dt_obj(dt);
2348         struct inode *inode = obj->oo_inode;
2349         struct osd_access_lock *al;
2350         struct osd_thandle *oh;
2351         int rc = 0, found = 0;
2352
2353         ENTRY;
2354
2355         LASSERT(dt_object_exists(dt));
2356         LASSERT(osd_invariant(obj));
2357         LASSERT(inode != NULL);
2358
2359         dquot_initialize(inode);
2360
2361         LASSERT(th);
2362         oh = container_of(th, struct osd_thandle, ot_super);
2363         LASSERT(oh->ot_handle->h_transaction != NULL);
2364
2365         list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2366                 if (obj != al->tl_obj)
2367                         continue;
2368                 LASSERT(al->tl_shared == 0);
2369                 found = 1;
2370                 /* do actual punch in osd_trans_stop() */
2371                 al->tl_start = start;
2372                 al->tl_end = end;
2373                 al->tl_mode = mode;
2374                 al->tl_punch = true;
2375                 break;
2376         }
2377
2378         RETURN(rc);
2379 }
2380
2381 static int osd_fallocate(const struct lu_env *env, struct dt_object *dt,
2382                          __u64 start, __u64 end, int mode, struct thandle *th)
2383 {
2384         int rc;
2385
2386         ENTRY;
2387
2388         if (mode & FALLOC_FL_PUNCH_HOLE) {
2389                 /* punch */
2390                 rc = osd_fallocate_punch(env, dt, start, end, mode, th);
2391         } else {
2392                 /* standard preallocate */
2393                 rc = osd_fallocate_preallocate(env, dt, start, end, mode, th);
2394         }
2395         RETURN(rc);
2396 }
2397
2398 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
2399                              __u64 start, __u64 end, struct thandle *th)
2400 {
2401         struct osd_thandle *oh;
2402         struct inode       *inode;
2403         int                 rc;
2404         ENTRY;
2405
2406         LASSERT(th);
2407         oh = container_of(th, struct osd_thandle, ot_super);
2408
2409         /*
2410          * we don't need to reserve credits for whole truncate
2411          * it's not possible as truncate may need to free too many
2412          * blocks and that won't fit a single transaction. instead
2413          * we reserve credits to change i_size and put inode onto
2414          * orphan list. if needed truncate will extend or restart
2415          * transaction
2416          */
2417         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
2418                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
2419
2420         inode = osd_dt_obj(dt)->oo_inode;
2421         LASSERT(inode);
2422
2423         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2424                                    i_projid_read(inode), 0, oh, osd_dt_obj(dt),
2425                                    NULL, OSD_QID_BLK);
2426
2427         if (rc == 0)
2428                 rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
2429
2430         RETURN(rc);
2431 }
2432
2433 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
2434                      __u64 start, __u64 end, struct thandle *th)
2435 {
2436         struct osd_object *obj = osd_dt_obj(dt);
2437         struct osd_device *osd = osd_obj2dev(obj);
2438         struct inode *inode = obj->oo_inode;
2439         struct osd_access_lock *al;
2440         struct osd_thandle *oh;
2441         int rc = 0, found = 0;
2442         bool grow = false;
2443         ENTRY;
2444
2445         LASSERT(dt_object_exists(dt));
2446         LASSERT(osd_invariant(obj));
2447         LASSERT(inode != NULL);
2448         dquot_initialize(inode);
2449
2450         LASSERT(th);
2451         oh = container_of(th, struct osd_thandle, ot_super);
2452         LASSERT(oh->ot_handle->h_transaction != NULL);
2453
2454         /* we used to skip truncate to current size to
2455          * optimize truncates on OST. with DoM we can
2456          * get attr_set to set specific size (MDS_REINT)
2457          * and then get truncate RPC which essentially
2458          * would be skipped. this is bad.. so, disable
2459          * this optimization on MDS till the client stop
2460          * to sent MDS_REINT (LU-11033) -bzzz
2461          */
2462         if (osd->od_is_ost && i_size_read(inode) == start)
2463                 RETURN(0);
2464
2465         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
2466
2467         spin_lock(&inode->i_lock);
2468         if (i_size_read(inode) < start)
2469                 grow = true;
2470         i_size_write(inode, start);
2471         spin_unlock(&inode->i_lock);
2472         /* if object holds encrypted content, we need to make sure we truncate
2473          * on an encryption unit boundary, or subsequent reads will get
2474          * corrupted content
2475          */
2476         if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2477             start & ~LUSTRE_ENCRYPTION_MASK)
2478                 start = (start & LUSTRE_ENCRYPTION_MASK) +
2479                         LUSTRE_ENCRYPTION_UNIT_SIZE;
2480         ll_truncate_pagecache(inode, start);
2481
2482         /* optimize grow case */
2483         if (grow) {
2484                 osd_execute_truncate(obj);
2485                 GOTO(out, rc);
2486         }
2487
2488         inode_lock(inode);
2489         /* add to orphan list to ensure truncate completion
2490          * if this transaction succeed. ldiskfs_truncate()
2491          * will take the inode out of the list
2492          */
2493         rc = ldiskfs_orphan_add(oh->ot_handle, inode);
2494         inode_unlock(inode);
2495         if (rc != 0)
2496                 GOTO(out, rc);
2497
2498         list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2499                 if (obj != al->tl_obj)
2500                         continue;
2501                 LASSERT(al->tl_shared == 0);
2502                 found = 1;
2503                 /* do actual truncate in osd_trans_stop() */
2504                 al->tl_truncate = 1;
2505                 break;
2506         }
2507         LASSERT(found);
2508
2509 out:
2510         RETURN(rc);
2511 }
2512
2513 static int fiemap_check_ranges(struct inode *inode,
2514                                u64 start, u64 len, u64 *new_len)
2515 {
2516         loff_t maxbytes;
2517
2518         *new_len = len;
2519
2520         if (len == 0)
2521                 return -EINVAL;
2522
2523         if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
2524                 maxbytes = inode->i_sb->s_maxbytes;
2525         else
2526                 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
2527
2528         if (start > maxbytes)
2529                 return -EFBIG;
2530
2531         /*
2532          * Shrink request scope to what the fs can actually handle.
2533          */
2534         if (len > maxbytes || (maxbytes - len) < start)
2535                 *new_len = maxbytes - start;
2536
2537         return 0;
2538 }
2539
2540 /* So that the fiemap access checks can't overflow on 32 bit machines. */
2541 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
2542
2543 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
2544                           struct fiemap *fm)
2545 {
2546         struct fiemap_extent_info fieinfo = {0, };
2547         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2548         u64 len;
2549         int rc;
2550
2551         LASSERT(inode);
2552         if (inode->i_op->fiemap == NULL)
2553                 return -EOPNOTSUPP;
2554
2555         if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
2556                 return -EINVAL;
2557
2558         rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
2559         if (rc)
2560                 return rc;
2561
2562         fieinfo.fi_flags = fm->fm_flags;
2563         fieinfo.fi_extents_max = fm->fm_extent_count;
2564         fieinfo.fi_extents_start = fm->fm_extents;
2565
2566         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
2567                 filemap_write_and_wait(inode->i_mapping);
2568
2569         rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2570         fm->fm_flags = fieinfo.fi_flags;
2571         fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2572
2573         return rc;
2574 }
2575
2576 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2577                        __u64 start, __u64 end, enum lu_ladvise_type advice)
2578 {
2579         struct osd_object *obj = osd_dt_obj(dt);
2580         int rc = 0;
2581         ENTRY;
2582
2583         switch (advice) {
2584         case LU_LADVISE_DONTNEED:
2585                 if (end)
2586                         invalidate_mapping_pages(obj->oo_inode->i_mapping,
2587                                                  start >> PAGE_SHIFT,
2588                                                  (end - 1) >> PAGE_SHIFT);
2589                 break;
2590         default:
2591                 rc = -ENOTSUPP;
2592                 break;
2593         }
2594
2595         RETURN(rc);
2596 }
2597
2598 static loff_t osd_lseek(const struct lu_env *env, struct dt_object *dt,
2599                         loff_t offset, int whence)
2600 {
2601         struct osd_object *obj = osd_dt_obj(dt);
2602         struct inode *inode = obj->oo_inode;
2603         struct file *file;
2604         loff_t result;
2605
2606         ENTRY;
2607
2608         LASSERT(dt_object_exists(dt));
2609         LASSERT(osd_invariant(obj));
2610         LASSERT(inode);
2611         LASSERT(offset >= 0);
2612
2613         file = osd_quasi_file(env, inode);
2614         result = file->f_op->llseek(file, offset, whence);
2615
2616         /*
2617          * If 'offset' is beyond end of object file then treat it as not error
2618          * but valid case for SEEK_HOLE and return 'offset' as result.
2619          * LOV will decide if it is beyond real end of file or not.
2620          */
2621         if (whence == SEEK_HOLE && result == -ENXIO)
2622                 result = offset;
2623
2624         CDEBUG(D_INFO, "seek %s from %lld: %lld\n", whence == SEEK_HOLE ?
2625                        "hole" : "data", offset, result);
2626         RETURN(result);
2627 }
2628
2629 /*
2630  * in some cases we may need declare methods for objects being created
2631  * e.g., when we create symlink
2632  */
2633 const struct dt_body_operations osd_body_ops_new = {
2634         .dbo_declare_write = osd_declare_write,
2635 };
2636
2637 const struct dt_body_operations osd_body_ops = {
2638         .dbo_read                       = osd_read,
2639         .dbo_declare_write              = osd_declare_write,
2640         .dbo_write                      = osd_write,
2641         .dbo_bufs_get                   = osd_bufs_get,
2642         .dbo_bufs_put                   = osd_bufs_put,
2643         .dbo_write_prep                 = osd_write_prep,
2644         .dbo_declare_write_commit       = osd_declare_write_commit,
2645         .dbo_write_commit               = osd_write_commit,
2646         .dbo_read_prep                  = osd_read_prep,
2647         .dbo_declare_punch              = osd_declare_punch,
2648         .dbo_punch                      = osd_punch,
2649         .dbo_fiemap_get                 = osd_fiemap_get,
2650         .dbo_ladvise                    = osd_ladvise,
2651         .dbo_declare_fallocate          = osd_declare_fallocate,
2652         .dbo_fallocate                  = osd_fallocate,
2653         .dbo_lseek                      = osd_lseek,
2654 };
2655
2656 /**
2657  * Get a truncate lock
2658  *
2659  * In order to take multi-transaction truncate out of main transaction we let
2660  * the caller grab a lock on the object passed. the lock can be shared (for
2661  * writes) and exclusive (for truncate). It's not allowed to mix truncate
2662  * and write in the same transaction handle (do not confuse with big ldiskfs
2663  * transaction containing lots of handles).
2664  * The lock must be taken at declaration.
2665  *
2666  * \param obj           object to lock
2667  * \oh                  transaction
2668  * \shared              shared or exclusive
2669  *
2670  * \retval 0            lock is granted
2671  * \retval -NOMEM       no memory to allocate lock
2672  */
2673 int osd_trunc_lock(struct osd_object *obj, struct osd_thandle *oh, bool shared)
2674 {
2675         struct osd_access_lock *al, *tmp;
2676
2677         LASSERT(obj);
2678         LASSERT(oh);
2679
2680         list_for_each_entry(tmp, &oh->ot_trunc_locks, tl_list) {
2681                 if (tmp->tl_obj != obj)
2682                         continue;
2683                 LASSERT(tmp->tl_shared == shared);
2684                 /* found same lock */
2685                 return 0;
2686         }
2687
2688         OBD_ALLOC_PTR(al);
2689         if (unlikely(al == NULL))
2690                 return -ENOMEM;
2691         al->tl_obj = obj;
2692         al->tl_truncate = false;
2693         if (shared)
2694                 down_read(&obj->oo_ext_idx_sem);
2695         else
2696                 down_write(&obj->oo_ext_idx_sem);
2697         al->tl_shared = shared;
2698         lu_object_get(&obj->oo_dt.do_lu);
2699
2700         list_add(&al->tl_list, &oh->ot_trunc_locks);
2701
2702         return 0;
2703 }
2704
2705 void osd_trunc_unlock_all(const struct lu_env *env, struct list_head *list)
2706 {
2707         struct osd_access_lock *al, *tmp;
2708
2709         list_for_each_entry_safe(al, tmp, list, tl_list) {
2710                 if (al->tl_shared)
2711                         up_read(&al->tl_obj->oo_ext_idx_sem);
2712                 else
2713                         up_write(&al->tl_obj->oo_ext_idx_sem);
2714                 osd_object_put(env, al->tl_obj);
2715                 list_del(&al->tl_list);
2716                 OBD_FREE_PTR(al);
2717         }
2718 }
2719
2720 /*
2721  * For a partial-page truncate, flush the page to disk immediately to
2722  * avoid data corruption during direct disk write.  b=17397
2723  */
2724 static void osd_partial_page_flush(struct osd_device *d, struct inode *inode,
2725                                    loff_t offset)
2726 {
2727         if (!(offset & ~PAGE_MASK))
2728                 return;
2729
2730         if (osd_use_page_cache(d)) {
2731                 filemap_fdatawrite_range(inode->i_mapping, offset, offset + 1);
2732         } else {
2733                 /* Notice we use "wait" version to ensure I/O is complete */
2734                 filemap_write_and_wait_range(inode->i_mapping, offset,
2735                                              offset + 1);
2736                 invalidate_mapping_pages(inode->i_mapping, offset >> PAGE_SHIFT,
2737                                          offset >> PAGE_SHIFT);
2738         }
2739 }
2740
2741 void osd_execute_truncate(struct osd_object *obj)
2742 {
2743         struct osd_device *d = osd_obj2dev(obj);
2744         struct inode *inode = obj->oo_inode;
2745         __u64 size;
2746
2747         /* simulate crash before (in the middle) of delayed truncate */
2748         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FAIL_AT_TRUNCATE)) {
2749                 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2750                 struct ldiskfs_sb_info *sbi = LDISKFS_SB(inode->i_sb);
2751
2752                 mutex_lock(&sbi->s_orphan_lock);
2753                 list_del_init(&ei->i_orphan);
2754                 mutex_unlock(&sbi->s_orphan_lock);
2755                 return;
2756         }
2757
2758         size = i_size_read(inode);
2759         inode_lock(inode);
2760         /* if object holds encrypted content, we need to make sure we truncate
2761          * on an encryption unit boundary, or block content will get corrupted
2762          */
2763         if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2764             size & ~LUSTRE_ENCRYPTION_MASK)
2765                 inode->i_size = (size & LUSTRE_ENCRYPTION_MASK) +
2766                         LUSTRE_ENCRYPTION_UNIT_SIZE;
2767         ldiskfs_truncate(inode);
2768         inode_unlock(inode);
2769         if (inode->i_size != size) {
2770                 spin_lock(&inode->i_lock);
2771                 i_size_write(inode, size);
2772                 LDISKFS_I(inode)->i_disksize = size;
2773                 spin_unlock(&inode->i_lock);
2774                 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2775         }
2776         osd_partial_page_flush(d, inode, size);
2777 }
2778
2779 void osd_execute_punch(const struct lu_env *env, struct osd_object *obj,
2780                        loff_t start, loff_t end, int mode)
2781 {
2782         struct osd_device *d = osd_obj2dev(obj);
2783         struct inode *inode = obj->oo_inode;
2784         struct file *file = osd_quasi_file(env, inode);
2785
2786         file->f_op->fallocate(file, mode, start, end - start);
2787         osd_partial_page_flush(d, inode, start);
2788         osd_partial_page_flush(d, inode, end - 1);
2789 }
2790
2791 void osd_process_truncates(const struct lu_env *env, struct list_head *list)
2792 {
2793         struct osd_access_lock *al;
2794
2795         LASSERT(journal_current_handle() == NULL);
2796
2797         list_for_each_entry(al, list, tl_list) {
2798                 if (al->tl_shared)
2799                         continue;
2800                 if (al->tl_truncate)
2801                         osd_execute_truncate(al->tl_obj);
2802                 else if (al->tl_punch)
2803                         osd_execute_punch(env, al->tl_obj, al->tl_start,
2804                                           al->tl_end, al->tl_mode);
2805         }
2806 }