Whamcloud - gitweb
LU-14641 osd-ldiskfs: write commit declaring improvement
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/osd/osd_io.c
32  *
33  * body operations
34  *
35  * Author: Nikita Danilov <nikita@clusterfs.com>
36  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
37  *
38  */
39
40 #define DEBUG_SUBSYSTEM S_OSD
41
42 /* prerequisite for linux/xattr.h */
43 #include <linux/types.h>
44 /* prerequisite for linux/xattr.h */
45 #include <linux/fs.h>
46 #include <linux/mm.h>
47 #include <linux/pagevec.h>
48
49 /*
50  * struct OBD_{ALLOC,FREE}*()
51  * OBD_FAIL_CHECK
52  */
53 #include <obd_support.h>
54
55 #include "osd_internal.h"
56
57 /* ext_depth() */
58 #include <ldiskfs/ldiskfs_extents.h>
59
60 static inline bool osd_use_page_cache(struct osd_device *d)
61 {
62         /* do not use pagecache if write and read caching are disabled */
63         if (d->od_writethrough_cache + d->od_read_cache == 0)
64                 return false;
65         /* use pagecache by default */
66         return true;
67 }
68
69 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
70                             int rw, int line, int pages)
71 {
72         int blocks, i;
73
74         LASSERTF(iobuf->dr_elapsed_valid == 0,
75                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
76                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
77                  iobuf->dr_init_at);
78         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
79
80         init_waitqueue_head(&iobuf->dr_wait);
81         atomic_set(&iobuf->dr_numreqs, 0);
82         iobuf->dr_npages = 0;
83         iobuf->dr_error = 0;
84         iobuf->dr_dev = d;
85         iobuf->dr_frags = 0;
86         iobuf->dr_elapsed = ktime_set(0, 0);
87         /* must be counted before, so assert */
88         iobuf->dr_rw = rw;
89         iobuf->dr_init_at = line;
90
91         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
92         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
93                 LASSERT(iobuf->dr_pg_buf.lb_len >=
94                         pages * sizeof(iobuf->dr_pages[0]));
95                 return 0;
96         }
97
98         /* start with 1MB for 4K blocks */
99         i = 256;
100         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
101                 i <<= 1;
102
103         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
104                (unsigned int)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
105         pages = i;
106         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
107         iobuf->dr_max_pages = 0;
108         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
109                (unsigned int)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
110
111         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
112         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
113         if (unlikely(iobuf->dr_blocks == NULL))
114                 return -ENOMEM;
115
116         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
117         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
118         if (unlikely(iobuf->dr_pages == NULL))
119                 return -ENOMEM;
120
121         lu_buf_realloc(&iobuf->dr_lnb_buf,
122                        pages * sizeof(iobuf->dr_lnbs[0]));
123         iobuf->dr_lnbs = iobuf->dr_lnb_buf.lb_buf;
124         if (unlikely(iobuf->dr_lnbs == NULL))
125                 return -ENOMEM;
126
127         iobuf->dr_max_pages = pages;
128
129         return 0;
130 }
131 #define osd_init_iobuf(dev, iobuf, rw, pages) \
132         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
133
134 static void osd_iobuf_add_page(struct osd_iobuf *iobuf,
135                                struct niobuf_local *lnb)
136 {
137         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
138         iobuf->dr_pages[iobuf->dr_npages] = lnb->lnb_page;
139         iobuf->dr_lnbs[iobuf->dr_npages] = lnb;
140         iobuf->dr_npages++;
141 }
142
143 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
144 {
145         int rw = iobuf->dr_rw;
146
147         if (iobuf->dr_elapsed_valid) {
148                 iobuf->dr_elapsed_valid = 0;
149                 LASSERT(iobuf->dr_dev == d);
150                 LASSERT(iobuf->dr_frags > 0);
151                 lprocfs_oh_tally(&d->od_brw_stats.hist[BRW_R_DIO_FRAGS+rw],
152                                  iobuf->dr_frags);
153                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
154                                       ktime_to_ms(iobuf->dr_elapsed));
155         }
156 }
157
158 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
159 static void dio_complete_routine(struct bio *bio)
160 {
161         int error = blk_status_to_errno(bio->bi_status);
162 #else
163 static void dio_complete_routine(struct bio *bio, int error)
164 {
165 #endif
166         struct osd_iobuf *iobuf = bio->bi_private;
167         struct bio_vec *bvl;
168
169         /* CAVEAT EMPTOR: possibly in IRQ context
170          * DO NOT record procfs stats here!!!
171          */
172
173         if (unlikely(iobuf == NULL)) {
174                 CERROR("***** bio->bi_private is NULL!  This should never happen.  Normally, I would crash here, but instead I will dump the bio contents to the console.  Please report this to <https://jira.whamcloud.com/> , along with any interesting messages leading up to this point (like SCSI errors, perhaps).  Because bi_private is NULL, I can't wake up the thread that initiated this IO - you will probably have to reboot this node.\n");
175                 CERROR("bi_next: %p, bi_flags: %lx, " __stringify(bi_opf)
176                        ": %x, bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, bi_private: %p\n",
177                        bio->bi_next, (unsigned long)bio->bi_flags,
178                        (unsigned int)bio->bi_opf, bio->bi_vcnt, bio_idx(bio),
179                        bio_sectors(bio) << 9, bio->bi_end_io,
180                        atomic_read(&bio->__bi_cnt),
181                        bio->bi_private);
182                 return;
183         }
184
185         /* the check is outside of the cycle for performance reason -bzzz */
186         if (!bio_data_dir(bio)) {
187                 DECLARE_BVEC_ITER_ALL(iter_all);
188
189                 bio_for_each_segment_all(bvl, bio, iter_all) {
190                         if (likely(error == 0))
191                                 SetPageUptodate(bvl_to_page(bvl));
192                         LASSERT(PageLocked(bvl_to_page(bvl)));
193                 }
194                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
195         } else {
196                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
197         }
198
199         /* any real error is good enough -bzzz */
200         if (error != 0 && iobuf->dr_error == 0)
201                 iobuf->dr_error = error;
202
203         /*
204          * set dr_elapsed before dr_numreqs turns to 0, otherwise
205          * it's possible that service thread will see dr_numreqs
206          * is zero, but dr_elapsed is not set yet, leading to lost
207          * data in this processing and an assertion in a subsequent
208          * call to OSD.
209          */
210         if (atomic_read(&iobuf->dr_numreqs) == 1) {
211                 ktime_t now = ktime_get();
212
213                 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
214                 iobuf->dr_elapsed_valid = 1;
215         }
216         if (atomic_dec_and_test(&iobuf->dr_numreqs))
217                 wake_up(&iobuf->dr_wait);
218
219         /* Completed bios used to be chained off iobuf->dr_bios and freed in
220          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
221          * mempool when serious on-disk fragmentation was encountered,
222          * deadlocking the OST.  The bios are now released as soon as complete
223          * so the pool cannot be exhausted while IOs are competing. b=10076
224          */
225         bio_put(bio);
226 }
227
228 static void record_start_io(struct osd_iobuf *iobuf, int size)
229 {
230         struct osd_device    *osd = iobuf->dr_dev;
231         struct obd_histogram *h = osd->od_brw_stats.hist;
232
233         iobuf->dr_frags++;
234         atomic_inc(&iobuf->dr_numreqs);
235
236         if (iobuf->dr_rw == 0) {
237                 atomic_inc(&osd->od_r_in_flight);
238                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
239                                  atomic_read(&osd->od_r_in_flight));
240                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
241         } else if (iobuf->dr_rw == 1) {
242                 atomic_inc(&osd->od_w_in_flight);
243                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
244                                  atomic_read(&osd->od_w_in_flight));
245                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
246         } else {
247                 LBUG();
248         }
249 }
250
251 static void osd_submit_bio(int rw, struct bio *bio)
252 {
253         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
254 #ifdef HAVE_SUBMIT_BIO_2ARGS
255         submit_bio(rw ? WRITE : READ, bio);
256 #else
257         bio->bi_opf |= rw;
258         submit_bio(bio);
259 #endif
260 }
261
262 static int can_be_merged(struct bio *bio, sector_t sector)
263 {
264         if (bio == NULL)
265                 return 0;
266
267         return bio_end_sector(bio) == sector ? 1 : 0;
268 }
269
270 #if IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)
271 /*
272  * This function will change the data written, thus it should only be
273  * used when checking data integrity feature
274  */
275 static void bio_integrity_fault_inject(struct bio *bio)
276 {
277         struct bio_vec *bvec;
278         DECLARE_BVEC_ITER_ALL(iter_all);
279         void *kaddr;
280         char *addr;
281
282         bio_for_each_segment_all(bvec, bio, iter_all) {
283                 struct page *page = bvec->bv_page;
284
285                 kaddr = kmap(page);
286                 addr = kaddr;
287                 *addr = ~(*addr);
288                 kunmap(page);
289                 break;
290         }
291 }
292
293 static int bio_dif_compare(__u16 *expected_guard_buf, void *bio_prot_buf,
294                            unsigned int sectors, int tuple_size)
295 {
296         __u16 *expected_guard;
297         __u16 *bio_guard;
298         int i;
299
300         expected_guard = expected_guard_buf;
301         for (i = 0; i < sectors; i++) {
302                 bio_guard = (__u16 *)bio_prot_buf;
303                 if (*bio_guard != *expected_guard) {
304                         CERROR(
305                                "unexpected guard tags on sector %d expected guard %u, bio guard %u, sectors %u, tuple size %d\n",
306                                i, *expected_guard, *bio_guard, sectors,
307                                tuple_size);
308                         return -EIO;
309                 }
310                 expected_guard++;
311                 bio_prot_buf += tuple_size;
312         }
313         return 0;
314 }
315
316 static int osd_bio_integrity_compare(struct bio *bio, struct block_device *bdev,
317                                      struct osd_iobuf *iobuf, int index)
318 {
319         struct blk_integrity *bi = bdev_get_integrity(bdev);
320         struct bio_integrity_payload *bip = bio->bi_integrity;
321         struct niobuf_local *lnb;
322         unsigned short sector_size = blk_integrity_interval(bi);
323         void *bio_prot_buf = page_address(bip->bip_vec->bv_page) +
324                 bip->bip_vec->bv_offset;
325         struct bio_vec *bv;
326         sector_t sector = bio_start_sector(bio);
327         unsigned int sectors, total;
328         DECLARE_BVEC_ITER_ALL(iter_all);
329         __u16 *expected_guard;
330         int rc;
331
332         total = 0;
333         bio_for_each_segment_all(bv, bio, iter_all) {
334                 lnb = iobuf->dr_lnbs[index];
335                 expected_guard = lnb->lnb_guards;
336                 sectors = bv->bv_len / sector_size;
337                 if (lnb->lnb_guard_rpc) {
338                         rc = bio_dif_compare(expected_guard, bio_prot_buf,
339                                              sectors, bi->tuple_size);
340                         if (rc)
341                                 return rc;
342                 }
343
344                 sector += sectors;
345                 bio_prot_buf += sectors * bi->tuple_size;
346                 total += sectors * bi->tuple_size;
347                 LASSERT(total <= bip_size(bio->bi_integrity));
348                 index++;
349         }
350         return 0;
351 }
352
353 static int osd_bio_integrity_handle(struct osd_device *osd, struct bio *bio,
354                                     struct osd_iobuf *iobuf,
355                                     int start_page_idx, bool fault_inject,
356                                     bool integrity_enabled)
357 {
358         struct super_block *sb = osd_sb(osd);
359         integrity_gen_fn *generate_fn = NULL;
360         integrity_vrfy_fn *verify_fn = NULL;
361         int rc;
362
363         ENTRY;
364
365         if (!integrity_enabled)
366                 RETURN(0);
367
368         rc = osd_get_integrity_profile(osd, &generate_fn, &verify_fn);
369         if (rc)
370                 RETURN(rc);
371
372         rc = bio_integrity_prep_fn(bio, generate_fn, verify_fn);
373         if (rc)
374                 RETURN(rc);
375
376         /* Verify and inject fault only when writing */
377         if (iobuf->dr_rw == 1) {
378                 if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_CMP))) {
379                         rc = osd_bio_integrity_compare(bio, sb->s_bdev, iobuf,
380                                                        start_page_idx);
381                         if (rc)
382                                 RETURN(rc);
383                 }
384
385                 if (unlikely(fault_inject))
386                         bio_integrity_fault_inject(bio);
387         }
388
389         RETURN(0);
390 }
391
392 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
393 #  ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
394 static void dio_integrity_complete_routine(struct bio *bio)
395 #  else
396 static void dio_integrity_complete_routine(struct bio *bio, int error)
397 #  endif
398 {
399         struct osd_bio_private *bio_private = bio->bi_private;
400
401         bio->bi_private = bio_private->obp_iobuf;
402         osd_dio_complete_routine(bio, error);
403
404         OBD_FREE_PTR(bio_private);
405 }
406 #endif /* HAVE_BIO_INTEGRITY_PREP_FN */
407 #else  /* !CONFIG_BLK_DEV_INTEGRITY */
408 #define osd_bio_integrity_handle(osd, bio, iobuf, start_page_idx, \
409                                  fault_inject, integrity_enabled) 0
410 #endif /* CONFIG_BLK_DEV_INTEGRITY */
411
412 static int osd_bio_init(struct bio *bio, struct osd_iobuf *iobuf,
413                         bool integrity_enabled, int start_page_idx,
414                         struct osd_bio_private **pprivate)
415 {
416         ENTRY;
417
418         *pprivate = NULL;
419
420 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
421         if (integrity_enabled) {
422                 struct osd_bio_private *bio_private = NULL;
423
424                 OBD_ALLOC_GFP(bio_private, sizeof(*bio_private), GFP_NOIO);
425                 if (bio_private == NULL)
426                         RETURN(-ENOMEM);
427                 bio->bi_end_io = dio_integrity_complete_routine;
428                 bio->bi_private = bio_private;
429                 bio_private->obp_start_page_idx = start_page_idx;
430                 bio_private->obp_iobuf = iobuf;
431                 *pprivate = bio_private;
432         } else
433 #endif
434         {
435                 bio->bi_end_io = dio_complete_routine;
436                 bio->bi_private = iobuf;
437         }
438
439         RETURN(0);
440 }
441
442 static void osd_mark_page_io_done(struct osd_iobuf *iobuf,
443                                   struct inode *inode,
444                                   sector_t start_blocks,
445                                   sector_t count)
446 {
447         struct niobuf_local *lnb;
448         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
449         pgoff_t pg_start, pg_end;
450
451         pg_start = start_blocks / blocks_per_page;
452         if (start_blocks % blocks_per_page)
453                 pg_start++;
454         if (count >= blocks_per_page)
455                 pg_end = (start_blocks + count -
456                           blocks_per_page) / blocks_per_page;
457         else
458                 return; /* nothing to mark */
459         for ( ; pg_start <= pg_end; pg_start++) {
460                 lnb = iobuf->dr_lnbs[pg_start];
461                 lnb->lnb_flags |= OBD_BRW_DONE;
462         }
463 }
464
465 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
466                       struct osd_iobuf *iobuf, sector_t start_blocks,
467                       sector_t count)
468 {
469         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
470         struct page **pages = iobuf->dr_pages;
471         int npages = iobuf->dr_npages;
472         sector_t *blocks = iobuf->dr_blocks;
473         struct super_block *sb = inode->i_sb;
474         int sector_bits = sb->s_blocksize_bits - 9;
475         unsigned int blocksize = sb->s_blocksize;
476         struct block_device *bdev = sb->s_bdev;
477         struct osd_bio_private *bio_private = NULL;
478         struct bio *bio = NULL;
479         int bio_start_page_idx;
480         struct page *page;
481         unsigned int page_offset;
482         sector_t sector;
483         int nblocks;
484         int block_idx, block_idx_end;
485         int page_idx, page_idx_start;
486         int i;
487         int rc = 0;
488         bool fault_inject;
489         bool integrity_enabled;
490         struct blk_plug plug;
491         int blocks_left_page;
492
493         ENTRY;
494
495         fault_inject = OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT);
496         LASSERT(iobuf->dr_npages == npages);
497
498         integrity_enabled = bdev_integrity_enabled(bdev, iobuf->dr_rw);
499
500         osd_brw_stats_update(osd, iobuf);
501         iobuf->dr_start_time = ktime_get();
502
503         if (!count)
504                 count = npages * blocks_per_page;
505         block_idx_end = start_blocks + count;
506
507         blk_start_plug(&plug);
508
509         page_idx_start = start_blocks / blocks_per_page;
510         for (page_idx = page_idx_start, block_idx = start_blocks;
511              block_idx < block_idx_end; page_idx++,
512              block_idx += blocks_left_page) {
513                 page = pages[page_idx];
514                 LASSERT(page_idx < iobuf->dr_npages);
515
516                 i = block_idx % blocks_per_page;
517                 blocks_left_page = blocks_per_page - i;
518                 for (page_offset = i * blocksize; i < blocks_left_page;
519                      i += nblocks, page_offset += blocksize * nblocks) {
520                         nblocks = 1;
521
522                         if (blocks[block_idx + i] == 0) {  /* hole */
523                                 LASSERTF(iobuf->dr_rw == 0,
524                                          "page_idx %u, block_idx %u, i %u,"
525                                          "start_blocks: %llu, count: %llu, npages: %d\n",
526                                          page_idx, block_idx, i,
527                                          (unsigned long long)start_blocks,
528                                          (unsigned long long)count, npages);
529                                 memset(kmap(page) + page_offset, 0, blocksize);
530                                 kunmap(page);
531                                 continue;
532                         }
533
534                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
535
536                         /* Additional contiguous file blocks? */
537                         while (i + nblocks < blocks_left_page &&
538                                (sector + (nblocks << sector_bits)) ==
539                                ((sector_t)blocks[block_idx + i + nblocks] <<
540                                  sector_bits))
541                                 nblocks++;
542
543                         if (bio && can_be_merged(bio, sector) &&
544                             bio_add_page(bio, page, blocksize * nblocks,
545                                          page_offset) != 0)
546                                 continue;       /* added this frag OK */
547
548                         if (bio != NULL) {
549                                 struct request_queue *q = bio_get_queue(bio);
550                                 unsigned int bi_size = bio_sectors(bio) << 9;
551
552                                 /* Dang! I have to fragment this I/O */
553                                 CDEBUG(D_INODE,
554                                        "bio++ sz %d vcnt %d(%d) sectors %d(%d) psg %d(%d)\n",
555                                        bi_size, bio->bi_vcnt, bio->bi_max_vecs,
556                                        bio_sectors(bio),
557                                        queue_max_sectors(q),
558                                        osd_bio_nr_segs(bio),
559                                        queue_max_segments(q));
560                                 rc = osd_bio_integrity_handle(osd, bio,
561                                         iobuf, bio_start_page_idx,
562                                         fault_inject, integrity_enabled);
563                                 if (rc) {
564                                         bio_put(bio);
565                                         goto out;
566                                 }
567
568                                 record_start_io(iobuf, bi_size);
569                                 osd_submit_bio(iobuf->dr_rw, bio);
570                         }
571
572                         bio_start_page_idx = page_idx;
573                         /* allocate new bio */
574                         bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
575                                         (block_idx_end - block_idx +
576                                          blocks_left_page - 1)));
577                         if (bio == NULL) {
578                                 CERROR("Can't allocate bio %u pages\n",
579                                        block_idx_end - block_idx +
580                                        blocks_left_page - 1);
581                                 rc = -ENOMEM;
582                                 goto out;
583                         }
584
585                         bio_set_dev(bio, bdev);
586                         bio_set_sector(bio, sector);
587                         bio->bi_opf = iobuf->dr_rw ? WRITE : READ;
588                         rc = osd_bio_init(bio, iobuf, integrity_enabled,
589                                           bio_start_page_idx, &bio_private);
590                         if (rc) {
591                                 bio_put(bio);
592                                 goto out;
593                         }
594
595                         rc = bio_add_page(bio, page,
596                                           blocksize * nblocks, page_offset);
597                         LASSERT(rc != 0);
598                 }
599         }
600
601         if (bio != NULL) {
602                 rc = osd_bio_integrity_handle(osd, bio, iobuf,
603                                               bio_start_page_idx,
604                                               fault_inject,
605                                               integrity_enabled);
606                 if (rc) {
607                         bio_put(bio);
608                         goto out;
609                 }
610
611                 record_start_io(iobuf, bio_sectors(bio) << 9);
612                 osd_submit_bio(iobuf->dr_rw, bio);
613                 rc = 0;
614         }
615
616 out:
617         blk_finish_plug(&plug);
618
619         /* in order to achieve better IO throughput, we don't wait for writes
620          * completion here. instead we proceed with transaction commit in
621          * parallel and wait for IO completion once transaction is stopped
622          * see osd_trans_stop() for more details -bzzz
623          */
624         if (iobuf->dr_rw == 0 || fault_inject) {
625                 wait_event(iobuf->dr_wait,
626                            atomic_read(&iobuf->dr_numreqs) == 0);
627                 osd_fini_iobuf(osd, iobuf);
628         }
629
630         if (rc == 0) {
631                 rc = iobuf->dr_error;
632         } else {
633                 if (bio_private)
634                         OBD_FREE_PTR(bio_private);
635         }
636
637         /* Write only now */
638         if (rc == 0 && iobuf->dr_rw)
639                 osd_mark_page_io_done(iobuf, inode,
640                                       start_blocks, count);
641
642         RETURN(rc);
643 }
644
645 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
646                                    struct niobuf_local *lnb, int maxlnb)
647 {
648         int rc = 0;
649         ENTRY;
650
651         *nrpages = 0;
652
653         while (len > 0) {
654                 int poff = offset & (PAGE_SIZE - 1);
655                 int plen = PAGE_SIZE - poff;
656
657                 if (*nrpages >= maxlnb) {
658                         rc = -EOVERFLOW;
659                         break;
660                 }
661
662                 if (plen > len)
663                         plen = len;
664                 lnb->lnb_file_offset = offset;
665                 lnb->lnb_page_offset = poff;
666                 lnb->lnb_len = plen;
667                 /* lnb->lnb_flags = rnb->rnb_flags; */
668                 lnb->lnb_flags = 0;
669                 lnb->lnb_page = NULL;
670                 lnb->lnb_rc = 0;
671                 lnb->lnb_guard_rpc = 0;
672                 lnb->lnb_guard_disk = 0;
673                 lnb->lnb_locked = 0;
674
675                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
676                          (long long) len);
677                 offset += plen;
678                 len -= plen;
679                 lnb++;
680                 (*nrpages)++;
681         }
682
683         RETURN(rc);
684 }
685
686 static struct page *osd_get_page(const struct lu_env *env, struct dt_object *dt,
687                                  loff_t offset, gfp_t gfp_mask, bool cache)
688 {
689         struct osd_thread_info *oti = osd_oti_get(env);
690         struct inode *inode = osd_dt_obj(dt)->oo_inode;
691         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
692         struct page *page;
693         int cur;
694
695         LASSERT(inode);
696
697         if (cache) {
698                 page = find_or_create_page(inode->i_mapping,
699                                            offset >> PAGE_SHIFT, gfp_mask);
700
701                 if (likely(page)) {
702                         LASSERT(!PagePrivate2(page));
703                         wait_on_page_writeback(page);
704                 } else {
705                         lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
706                 }
707
708                 return page;
709         }
710
711         if (inode->i_mapping->nrpages) {
712                 /* consult with pagecache, but do not create new pages */
713                 /* this is normally used once */
714                 page = find_lock_page(inode->i_mapping, offset >> PAGE_SHIFT);
715                 if (page) {
716                         wait_on_page_writeback(page);
717                         return page;
718                 }
719         }
720
721         LASSERT(oti->oti_dio_pages);
722         cur = oti->oti_dio_pages_used;
723         page = oti->oti_dio_pages[cur];
724
725         if (unlikely(!page)) {
726                 LASSERT(cur < PTLRPC_MAX_BRW_PAGES);
727                 page = alloc_page(gfp_mask);
728                 if (!page)
729                         return NULL;
730                 oti->oti_dio_pages[cur] = page;
731                 SetPagePrivate2(page);
732                 lock_page(page);
733         }
734
735         ClearPageUptodate(page);
736         page->index = offset >> PAGE_SHIFT;
737         oti->oti_dio_pages_used++;
738
739         return page;
740 }
741
742 /*
743  * there are following "locks":
744  * journal_start
745  * i_mutex
746  * page lock
747  *
748  * osd write path:
749  *  - lock page(s)
750  *  - journal_start
751  *  - truncate_sem
752  *
753  * ext4 vmtruncate:
754  *  - lock pages, unlock
755  *  - journal_start
756  *  - lock partial page
757  *  - i_data_sem
758  *
759  */
760
761 /**
762  * Unlock and release pages loaded by osd_bufs_get()
763  *
764  * Unlock \a npages pages from \a lnb and drop the refcount on them.
765  *
766  * \param env           thread execution environment
767  * \param dt            dt object undergoing IO (OSD object + methods)
768  * \param lnb           array of pages undergoing IO
769  * \param npages        number of pages in \a lnb
770  *
771  * \retval 0            always
772  */
773 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
774                         struct niobuf_local *lnb, int npages)
775 {
776         struct osd_thread_info *oti = osd_oti_get(env);
777         struct pagevec pvec;
778         int i;
779
780         ll_pagevec_init(&pvec, 0);
781
782         for (i = 0; i < npages; i++) {
783                 struct page *page = lnb[i].lnb_page;
784
785                 if (page == NULL)
786                         continue;
787
788                 /* if the page isn't cached, then reset uptodate
789                  * to prevent reuse
790                  */
791                 if (PagePrivate2(page)) {
792                         oti->oti_dio_pages_used--;
793                 } else {
794                         if (lnb[i].lnb_locked)
795                                 unlock_page(page);
796                         if (pagevec_add(&pvec, page) == 0)
797                                 pagevec_release(&pvec);
798                 }
799
800                 lnb[i].lnb_page = NULL;
801         }
802
803         LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used);
804
805         /* Release any partial pagevec */
806         pagevec_release(&pvec);
807
808         RETURN(0);
809 }
810
811 /**
812  * Load and lock pages undergoing IO
813  *
814  * Pages as described in the \a lnb array are fetched (from disk or cache)
815  * and locked for IO by the caller.
816  *
817  * DLM locking protects us from write and truncate competing for same region,
818  * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
819  * It's possible the writeout on a such a page is in progress when we access
820  * it. It's also possible that during this writeout we put new (partial) data
821  * into the page, but won't be able to proceed in filter_commitrw_write().
822  * Therefore, just wait for writeout completion as it should be rare enough.
823  *
824  * \param env           thread execution environment
825  * \param dt            dt object undergoing IO (OSD object + methods)
826  * \param pos           byte offset of IO start
827  * \param len           number of bytes of IO
828  * \param lnb           array of extents undergoing IO
829  * \param rw            read or write operation, and other flags
830  * \param capa          capabilities
831  *
832  * \retval pages        (zero or more) loaded successfully
833  * \retval -ENOMEM      on memory/page allocation error
834  */
835 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
836                         loff_t pos, ssize_t len, struct niobuf_local *lnb,
837                         int maxlnb, enum dt_bufs_type rw)
838 {
839         struct osd_thread_info *oti = osd_oti_get(env);
840         struct osd_object *obj = osd_dt_obj(dt);
841         struct osd_device *osd   = osd_obj2dev(obj);
842         int npages, i, iosize, rc = 0;
843         bool cache, write;
844         loff_t fsize;
845         gfp_t gfp_mask;
846
847         LASSERT(obj->oo_inode);
848
849         rc = osd_map_remote_to_local(pos, len, &npages, lnb, maxlnb);
850         if (rc)
851                 RETURN(rc);
852
853         write = rw & DT_BUFS_TYPE_WRITE;
854
855         fsize = lnb[npages - 1].lnb_file_offset + lnb[npages - 1].lnb_len;
856         iosize = fsize - lnb[0].lnb_file_offset;
857         fsize = max(fsize, i_size_read(obj->oo_inode));
858
859         cache = rw & DT_BUFS_TYPE_READAHEAD;
860         if (cache)
861                 goto bypass_checks;
862
863         cache = osd_use_page_cache(osd);
864         while (cache) {
865                 if (write) {
866                         if (!osd->od_writethrough_cache) {
867                                 cache = false;
868                                 break;
869                         }
870                         if (iosize > osd->od_writethrough_max_iosize) {
871                                 cache = false;
872                                 break;
873                         }
874                 } else {
875                         if (!osd->od_read_cache) {
876                                 cache = false;
877                                 break;
878                         }
879                         if (iosize > osd->od_readcache_max_iosize) {
880                                 cache = false;
881                                 break;
882                         }
883                 }
884                 /* don't use cache on large files */
885                 if (osd->od_readcache_max_filesize &&
886                     fsize > osd->od_readcache_max_filesize)
887                         cache = false;
888                 break;
889         }
890
891 bypass_checks:
892         if (!cache && unlikely(!oti->oti_dio_pages)) {
893                 OBD_ALLOC_PTR_ARRAY_LARGE(oti->oti_dio_pages,
894                                           PTLRPC_MAX_BRW_PAGES);
895                 if (!oti->oti_dio_pages)
896                         return -ENOMEM;
897         }
898
899         /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
900         gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
901                                              GFP_HIGHUSER;
902         for (i = 0; i < npages; i++, lnb++) {
903                 lnb->lnb_page = osd_get_page(env, dt, lnb->lnb_file_offset,
904                                              gfp_mask, cache);
905                 if (lnb->lnb_page == NULL)
906                         GOTO(cleanup, rc = -ENOMEM);
907
908                 lnb->lnb_locked = 1;
909         }
910
911 #if 0
912         /* XXX: this version doesn't invalidate cached pages, but use them */
913         if (!cache && write && obj->oo_inode->i_mapping->nrpages) {
914                 /* do not allow data aliasing, invalidate pagecache */
915                 /* XXX: can be quite expensive in mixed case */
916                 invalidate_mapping_pages(obj->oo_inode->i_mapping,
917                                 lnb[0].lnb_file_offset >> PAGE_SHIFT,
918                                 lnb[npages - 1].lnb_file_offset >> PAGE_SHIFT);
919         }
920 #endif
921
922         RETURN(i);
923
924 cleanup:
925         if (i > 0)
926                 osd_bufs_put(env, dt, lnb - i, i);
927         return rc;
928 }
929 /* Borrow @ext4_chunk_trans_blocks */
930 static int osd_chunk_trans_blocks(struct inode *inode, int nrblocks)
931 {
932         ldiskfs_group_t groups;
933         int gdpblocks;
934         int idxblocks;
935         int depth;
936         int ret;
937
938         depth = ext_depth(inode);
939         idxblocks = depth * 2;
940
941         /*
942          * Now let's see how many group bitmaps and group descriptors need
943          * to account.
944          */
945         groups = idxblocks + 1;
946         gdpblocks = groups;
947         if (groups > LDISKFS_SB(inode->i_sb)->s_groups_count)
948                 groups = LDISKFS_SB(inode->i_sb)->s_groups_count;
949         if (gdpblocks > LDISKFS_SB(inode->i_sb)->s_gdb_count)
950                 gdpblocks = LDISKFS_SB(inode->i_sb)->s_gdb_count;
951
952         /* bitmaps and block group descriptor blocks */
953         ret = idxblocks + groups + gdpblocks;
954
955         /* Blocks for super block, inode, quota and xattr blocks */
956         ret += LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
957
958         return ret;
959 }
960
961 #ifdef HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS
962 static int osd_extend_restart_trans(handle_t *handle, int needed,
963                                     struct inode *inode)
964 {
965         int rc;
966
967         rc = ldiskfs_journal_ensure_credits(handle, needed,
968                 ldiskfs_trans_default_revoke_credits(inode->i_sb));
969         /* this means journal has been restarted */
970         if (rc > 0)
971                 rc = 0;
972
973         return rc;
974 }
975 #else
976 static int osd_extend_restart_trans(handle_t *handle, int needed,
977                                     struct inode *inode)
978 {
979         int rc;
980
981         if (ldiskfs_handle_has_enough_credits(handle, needed))
982                 return 0;
983         rc = ldiskfs_journal_extend(handle,
984                                 needed - handle->h_buffer_credits);
985         if (rc <= 0)
986                 return rc;
987
988         return ldiskfs_journal_restart(handle, needed);
989 }
990 #endif /* HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS */
991
992 static int osd_ldiskfs_map_write(struct inode *inode, struct osd_iobuf *iobuf,
993                                  struct osd_device *osd, sector_t start_blocks,
994                                  sector_t count, loff_t *disk_size,
995                                  __u64 user_size)
996 {
997         /* if file has grown, take user_size into account */
998         if (user_size && *disk_size > user_size)
999                 *disk_size = user_size;
1000
1001         spin_lock(&inode->i_lock);
1002         if (*disk_size > i_size_read(inode)) {
1003                 i_size_write(inode, *disk_size);
1004                 LDISKFS_I(inode)->i_disksize = *disk_size;
1005                 spin_unlock(&inode->i_lock);
1006                 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
1007         } else {
1008                 spin_unlock(&inode->i_lock);
1009         }
1010
1011         /*
1012          * We don't do stats here as in read path because
1013          * write is async: we'll do this in osd_put_bufs()
1014          */
1015         return osd_do_bio(osd, inode, iobuf, start_blocks, count);
1016 }
1017
1018 static unsigned int osd_extent_bytes(const struct osd_device *o)
1019 {
1020         unsigned int *extent_bytes_ptr =
1021                         raw_cpu_ptr(o->od_extent_bytes_percpu);
1022
1023         if (likely(*extent_bytes_ptr))
1024                 return *extent_bytes_ptr;
1025
1026         /* initialize on first access or CPU hotplug */
1027         if (!ldiskfs_has_feature_extents(osd_sb(o)))
1028                 *extent_bytes_ptr = 1 << osd_sb(o)->s_blocksize_bits;
1029         else
1030                 *extent_bytes_ptr = OSD_DEFAULT_EXTENT_BYTES;
1031
1032         return *extent_bytes_ptr;
1033 }
1034
1035 #define EXTENT_BYTES_DECAY 64
1036 static void osd_decay_extent_bytes(struct osd_device *osd,
1037                                    unsigned int new_bytes)
1038 {
1039         unsigned int old_bytes;
1040
1041         if (!ldiskfs_has_feature_extents(osd_sb(osd)))
1042                 return;
1043
1044         old_bytes = osd_extent_bytes(osd);
1045         *raw_cpu_ptr(osd->od_extent_bytes_percpu) =
1046                 (old_bytes * (EXTENT_BYTES_DECAY - 1) +
1047                  min(new_bytes, OSD_DEFAULT_EXTENT_BYTES) +
1048                  EXTENT_BYTES_DECAY - 1) / EXTENT_BYTES_DECAY;
1049 }
1050
1051 static int osd_ldiskfs_map_inode_pages(struct inode *inode,
1052                                        struct osd_iobuf *iobuf,
1053                                        struct osd_device *osd,
1054                                        int create, __u64 user_size,
1055                                        int check_credits,
1056                                        struct thandle *thandle)
1057 {
1058         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
1059         int rc = 0, i = 0, mapped_index = 0;
1060         struct page *fp = NULL;
1061         int clen = 0;
1062         pgoff_t max_page_index;
1063         handle_t *handle = NULL;
1064         sector_t start_blocks = 0, count = 0;
1065         loff_t disk_size = 0;
1066         struct page **page = iobuf->dr_pages;
1067         int pages = iobuf->dr_npages;
1068         sector_t *blocks = iobuf->dr_blocks;
1069         struct niobuf_local *lnb1, *lnb2;
1070         loff_t size1, size2;
1071
1072         max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
1073
1074         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
1075                 inode->i_ino, pages, (*page)->index);
1076
1077         if (create) {
1078                 create = LDISKFS_GET_BLOCKS_CREATE;
1079                 handle = ldiskfs_journal_current_handle();
1080                 LASSERT(handle != NULL);
1081                 rc = osd_attach_jinode(inode);
1082                 if (rc)
1083                         return rc;
1084                 disk_size = i_size_read(inode);
1085                 /* if disk_size is already bigger than specified user_size,
1086                  * ignore user_size
1087                  */
1088                 if (disk_size > user_size)
1089                         user_size = 0;
1090         }
1091         /* pages are sorted already. so, we just have to find
1092          * contig. space and process them properly
1093          */
1094         while (i < pages) {
1095                 long blen, total = 0, previous_total = 0;
1096                 struct ldiskfs_map_blocks map = { 0 };
1097
1098                 if (fp == NULL) { /* start new extent */
1099                         fp = *page++;
1100                         clen = 1;
1101                         if (++i != pages)
1102                                 continue;
1103                 } else if (fp->index + clen == (*page)->index) {
1104                         /* continue the extent */
1105                         page++;
1106                         clen++;
1107                         if (++i != pages)
1108                                 continue;
1109                 }
1110                 if (fp->index + clen >= max_page_index)
1111                         GOTO(cleanup, rc = -EFBIG);
1112                 /* process found extent */
1113                 map.m_lblk = fp->index * blocks_per_page;
1114                 map.m_len = blen = clen * blocks_per_page;
1115 cont_map:
1116                 /**
1117                  * We might restart transaction for block allocations,
1118                  * in order to make sure data ordered mode, issue IO, disk
1119                  * size update and block allocations need be within same
1120                  * transaction to make sure consistency.
1121                  */
1122                 if (handle && check_credits) {
1123                         struct osd_thandle *oh;
1124
1125                         LASSERT(thandle != NULL);
1126                         oh = container_of(thandle, struct osd_thandle,
1127                                           ot_super);
1128                         /*
1129                          * only issue IO if restart transaction needed,
1130                          * as update disk size need hold inode lock, we
1131                          * want to avoid that as much as possible.
1132                          */
1133                         if (oh->oh_declared_ext <= 0) {
1134                                 rc = osd_ldiskfs_map_write(inode,
1135                                         iobuf, osd, start_blocks,
1136                                         count, &disk_size, user_size);
1137                                 if (rc)
1138                                         GOTO(cleanup, rc);
1139                                 thandle->th_restart_tran = 1;
1140                                 GOTO(cleanup, rc = -EAGAIN);
1141                         }
1142
1143                         if (OBD_FAIL_CHECK(OBD_FAIL_OST_RESTART_IO))
1144                                 oh->oh_declared_ext = 0;
1145                         else
1146                                 oh->oh_declared_ext--;
1147                 }
1148                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
1149                 if (rc >= 0) {
1150                         int c = 0;
1151
1152                         for (; total < blen && c < map.m_len; c++, total++) {
1153                                 if (rc == 0) {
1154                                         *(blocks + total) = 0;
1155                                         total++;
1156                                         break;
1157                                 }
1158                                 if ((map.m_flags & LDISKFS_MAP_UNWRITTEN) &&
1159                                     !create) {
1160                                         /* don't try to read allocated, but
1161                                          * unwritten blocks, instead fill the
1162                                          * patches with zeros in osd_do_bio() */
1163                                         *(blocks + total) = 0;
1164                                         continue;
1165                                 }
1166                                 *(blocks + total) = map.m_pblk + c;
1167                                 /* unmap any possible underlying
1168                                  * metadata from the block device
1169                                  * mapping.  b=6998.
1170                                  */
1171                                 if ((map.m_flags & LDISKFS_MAP_NEW) &&
1172                                     create)
1173                                         clean_bdev_aliases(inode->i_sb->s_bdev,
1174                                                            map.m_pblk + c, 1);
1175                         }
1176                         rc = 0;
1177                 }
1178
1179                 if (rc == 0 && create) {
1180                         count += (total - previous_total);
1181                         mapped_index = (count + blocks_per_page -
1182                                         1) / blocks_per_page - 1;
1183                         lnb1 = iobuf->dr_lnbs[i - clen];
1184                         lnb2 = iobuf->dr_lnbs[mapped_index];
1185                         size1 = lnb1->lnb_file_offset -
1186                                 (lnb1->lnb_file_offset % PAGE_SIZE) +
1187                                 (total << inode->i_blkbits);
1188                         size2 = lnb2->lnb_file_offset + lnb2->lnb_len;
1189
1190                         if (size1 > size2)
1191                                 size1 = size2;
1192                         if (size1 > disk_size)
1193                                 disk_size = size1;
1194                 }
1195
1196                 if (rc == 0 && total < blen) {
1197                         /*
1198                          * decay extent blocks if we could not
1199                          * allocate extent once.
1200                          */
1201                         osd_decay_extent_bytes(osd,
1202                                 (total - previous_total) << inode->i_blkbits);
1203                         map.m_lblk = fp->index * blocks_per_page + total;
1204                         map.m_len = blen - total;
1205                         previous_total = total;
1206                         goto cont_map;
1207                 }
1208                 if (rc != 0)
1209                         GOTO(cleanup, rc);
1210                 /*
1211                  * decay extent blocks if we could allocate
1212                  * good large extent.
1213                  */
1214                 if (total - previous_total >=
1215                     osd_extent_bytes(osd) >> inode->i_blkbits)
1216                         osd_decay_extent_bytes(osd,
1217                                 (total - previous_total) << inode->i_blkbits);
1218                 /* look for next extent */
1219                 fp = NULL;
1220                 blocks += blocks_per_page * clen;
1221         }
1222 cleanup:
1223         if (rc == 0 && create &&
1224             start_blocks < pages * blocks_per_page) {
1225                 rc = osd_ldiskfs_map_write(inode, iobuf, osd, start_blocks,
1226                                            count, &disk_size, user_size);
1227                 LASSERT(start_blocks + count == pages * blocks_per_page);
1228         }
1229         return rc;
1230 }
1231
1232 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1233                           struct niobuf_local *lnb, int npages)
1234 {
1235         struct osd_thread_info *oti   = osd_oti_get(env);
1236         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
1237         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
1238         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
1239         ktime_t start, end;
1240         s64 timediff;
1241         ssize_t isize;
1242         __s64  maxidx;
1243         int i, rc = 0;
1244
1245         LASSERT(inode);
1246
1247         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1248         if (unlikely(rc != 0))
1249                 RETURN(rc);
1250
1251         isize = i_size_read(inode);
1252         maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1253
1254         start = ktime_get();
1255         for (i = 0; i < npages; i++) {
1256
1257                 /*
1258                  * till commit the content of the page is undefined
1259                  * we'll set it uptodate once bulk is done. otherwise
1260                  * subsequent reads can access non-stable data
1261                  */
1262                 ClearPageUptodate(lnb[i].lnb_page);
1263
1264                 if (lnb[i].lnb_len == PAGE_SIZE)
1265                         continue;
1266
1267                 if (maxidx >= lnb[i].lnb_page->index) {
1268                         osd_iobuf_add_page(iobuf, &lnb[i]);
1269                 } else {
1270                         long off;
1271                         char *p = kmap(lnb[i].lnb_page);
1272
1273                         off = lnb[i].lnb_page_offset;
1274                         if (off)
1275                                 memset(p, 0, off);
1276                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1277                               ~PAGE_MASK;
1278                         if (off)
1279                                 memset(p + off, 0, PAGE_SIZE - off);
1280                         kunmap(lnb[i].lnb_page);
1281                 }
1282         }
1283         end = ktime_get();
1284         timediff = ktime_us_delta(end, start);
1285         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1286
1287         if (iobuf->dr_npages) {
1288                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1289                                                  0, 0, NULL);
1290                 if (likely(rc == 0)) {
1291                         rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1292                         /* do IO stats for preparation reads */
1293                         osd_fini_iobuf(osd, iobuf);
1294                 }
1295         }
1296         RETURN(rc);
1297 }
1298
1299 struct osd_fextent {
1300         sector_t        start;
1301         sector_t        end;
1302         unsigned int    mapped:1;
1303 };
1304
1305 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1306                          struct osd_fextent *cached_extent)
1307 {
1308         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1309         sector_t block = offset >> inode->i_blkbits;
1310         sector_t start;
1311         struct fiemap_extent_info fei = { 0 };
1312         struct fiemap_extent fe = { 0 };
1313         int rc;
1314
1315         if (block >= cached_extent->start && block < cached_extent->end)
1316                 return cached_extent->mapped;
1317
1318         if (i_size_read(inode) == 0)
1319                 return 0;
1320
1321         /* Beyond EOF, must not be mapped */
1322         if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1323                 return 0;
1324
1325         fei.fi_extents_max = 1;
1326         fei.fi_extents_start = &fe;
1327
1328         rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1329         if (rc != 0)
1330                 return 0;
1331
1332         start = fe.fe_logical >> inode->i_blkbits;
1333         if (fei.fi_extents_mapped == 0) {
1334                 /* a special case - no extent found at this offset and forward.
1335                  * we can consider this as a hole to EOF. it's safe to cache
1336                  * as other threads can not allocate/punch blocks this thread
1337                  * is working on (LDLM). */
1338                 cached_extent->start = block;
1339                 cached_extent->end = i_size_read(inode) >> inode->i_blkbits;
1340                 cached_extent->mapped = 0;
1341                 return 0;
1342         }
1343
1344         if (start > block) {
1345                 cached_extent->start = block;
1346                 cached_extent->end = start;
1347                 cached_extent->mapped = 0;
1348         } else {
1349                 cached_extent->start = start;
1350                 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1351                                       inode->i_blkbits;
1352                 cached_extent->mapped = 1;
1353         }
1354
1355         return cached_extent->mapped;
1356 }
1357
1358 #define MAX_EXTENTS_PER_WRITE 100
1359 static int osd_declare_write_commit(const struct lu_env *env,
1360                                     struct dt_object *dt,
1361                                     struct niobuf_local *lnb, int npages,
1362                                     struct thandle *handle)
1363 {
1364         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1365         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1366         struct osd_thandle      *oh;
1367         int                     extents = 0;
1368         int                     depth;
1369         int                     i;
1370         int                     newblocks = 0;
1371         int                     rc = 0;
1372         int                     credits = 0;
1373         long long               quota_space = 0;
1374         struct osd_fextent      mapped = { 0 }, extent = { 0 };
1375         enum osd_quota_local_flags local_flags = 0;
1376         enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1377         unsigned int            extent_bytes;
1378         ENTRY;
1379
1380         LASSERT(handle != NULL);
1381         oh = container_of(handle, struct osd_thandle, ot_super);
1382         LASSERT(oh->ot_handle == NULL);
1383
1384         /*
1385          * We track a decaying average extent blocks per filesystem,
1386          * for most of time, it will be 1M, with filesystem becoming
1387          * heavily-fragmented, it will be reduced to 4K at the worst.
1388          */
1389         extent_bytes = osd_extent_bytes(osd);
1390         LASSERT(extent_bytes >= (1 << osd_sb(osd)->s_blocksize));
1391
1392         /* calculate number of extents (probably better to pass nb) */
1393         for (i = 0; i < npages; i++) {
1394                 /* ignore quota for the whole request if any page is from
1395                  * client cache or written by root.
1396                  *
1397                  * XXX once we drop the 1.8 client support, the checking
1398                  * for whether page is from cache can be simplified as:
1399                  * !(lnb[i].flags & OBD_BRW_SYNC)
1400                  *
1401                  * XXX we could handle this on per-lnb basis as done by
1402                  * grant.
1403                  */
1404                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1405                     (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1406                     OBD_BRW_FROM_GRANT)
1407                         declare_flags |= OSD_QID_FORCE;
1408
1409                 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &mapped)) {
1410                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1411                         continue;
1412                 }
1413
1414                 if (lnb[i].lnb_flags & OBD_BRW_DONE) {
1415                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1416                         continue;
1417                 }
1418
1419                 /* count only unmapped changes */
1420                 newblocks++;
1421                 if (lnb[i].lnb_file_offset != extent.end || extent.end == 0) {
1422                         if (extent.end != 0)
1423                                 extents += (extent.end - extent.start +
1424                                             extent_bytes - 1) / extent_bytes;
1425                         extent.start = lnb[i].lnb_file_offset;
1426                         extent.end = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1427                 } else {
1428                         extent.end += lnb[i].lnb_len;
1429                 }
1430
1431                 quota_space += PAGE_SIZE;
1432         }
1433
1434         credits++; /* inode */
1435         /*
1436          * overwrite case, no need to modify tree and
1437          * allocate blocks.
1438          */
1439         if (!newblocks)
1440                 goto out_declare;
1441
1442         extents += (extent.end - extent.start +
1443                     extent_bytes - 1) / extent_bytes;
1444         /**
1445          * with system space usage growing up, mballoc codes won't
1446          * try best to scan block group to align best free extent as
1447          * we can. So extent bytes per extent could be decayed to a
1448          * very small value, this could make us reserve too many credits.
1449          * We could be more optimistic in the credit reservations, even
1450          * in a case where the filesystem is nearly full, it is extremely
1451          * unlikely that the worst case would ever be hit.
1452          */
1453         if (extents > MAX_EXTENTS_PER_WRITE)
1454                 extents = MAX_EXTENTS_PER_WRITE;
1455
1456         /*
1457          * each extent can go into new leaf causing a split
1458          * 5 is max tree depth: inode + 4 index blocks
1459          * with blockmaps, depth is 3 at most
1460          */
1461         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1462                 /*
1463                  * many concurrent threads may grow tree by the time
1464                  * our transaction starts. so, consider 2 is a min depth
1465                  */
1466                 depth = ext_depth(inode);
1467                 depth = max(depth, 1) + 1;
1468                 newblocks += depth;
1469                 credits += depth * 2 * extents;
1470         } else {
1471                 depth = 3;
1472                 newblocks += depth;
1473                 credits += depth * extents;
1474         }
1475
1476         oh->oh_declared_ext = extents;
1477
1478         /* quota space for metadata blocks */
1479         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1480
1481         /* quota space should be reported in 1K blocks */
1482         quota_space = toqb(quota_space);
1483
1484         /* each new block can go in different group (bitmap + gd) */
1485
1486         /* we can't dirty more bitmap blocks than exist */
1487         if (extents > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1488                 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1489         else
1490                 credits += extents;
1491
1492         /* we can't dirty more gd blocks than exist */
1493         if (extents > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1494                 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1495         else
1496                 credits += extents;
1497
1498         CDEBUG(D_INODE,
1499                "%s: inode #%lu extent_bytes %u extents %d credits %d\n",
1500                osd_ino2name(inode), inode->i_ino, extent_bytes, extents,
1501                credits);
1502
1503 out_declare:
1504         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1505
1506         /* make sure the over quota flags were not set */
1507         lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1508
1509         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1510                                    i_projid_read(inode), quota_space, oh,
1511                                    osd_dt_obj(dt), &local_flags, declare_flags);
1512
1513         /* we need only to store the overquota flags in the first lnb for
1514          * now, once we support multiple objects BRW, this code needs be
1515          * revised.
1516          */
1517         if (local_flags & QUOTA_FL_OVER_USRQUOTA)
1518                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1519         if (local_flags & QUOTA_FL_OVER_GRPQUOTA)
1520                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1521         if (local_flags & QUOTA_FL_OVER_PRJQUOTA)
1522                 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1523
1524         if (rc == 0)
1525                 rc = osd_trunc_lock(osd_dt_obj(dt), oh, true);
1526
1527         RETURN(rc);
1528 }
1529
1530 /* Check if a block is allocated or not */
1531 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1532                             struct niobuf_local *lnb, int npages,
1533                             struct thandle *thandle, __u64 user_size)
1534 {
1535         struct osd_thread_info *oti = osd_oti_get(env);
1536         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1537         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1538         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1539         int rc = 0, i, check_credits = 0;
1540
1541         LASSERT(inode);
1542
1543         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1544         if (unlikely(rc != 0))
1545                 RETURN(rc);
1546
1547         dquot_initialize(inode);
1548
1549         for (i = 0; i < npages; i++) {
1550                 if (lnb[i].lnb_rc == -ENOSPC &&
1551                     (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1552                         /* Allow the write to proceed if overwriting an
1553                          * existing block
1554                          */
1555                         lnb[i].lnb_rc = 0;
1556                 }
1557
1558                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1559                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1560                                lnb[i].lnb_rc);
1561                         LASSERT(lnb[i].lnb_page);
1562                         generic_error_remove_page(inode->i_mapping,
1563                                                   lnb[i].lnb_page);
1564                         continue;
1565                 }
1566
1567                 if (lnb[i].lnb_flags & OBD_BRW_DONE)
1568                         continue;
1569
1570                 if (!(lnb[i].lnb_flags & OBD_BRW_MAPPED))
1571                         check_credits = 1;
1572
1573                 LASSERT(PageLocked(lnb[i].lnb_page));
1574                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1575
1576                 /*
1577                  * Since write and truncate are serialized by oo_sem, even
1578                  * partial-page truncate should not leave dirty pages in the
1579                  * page cache.
1580                  */
1581                 LASSERT(!PageDirty(lnb[i].lnb_page));
1582
1583                 SetPageUptodate(lnb[i].lnb_page);
1584
1585                 osd_iobuf_add_page(iobuf, &lnb[i]);
1586         }
1587
1588         osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1589
1590         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1591                 rc = -ENOSPC;
1592         } else if (iobuf->dr_npages > 0) {
1593                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd,
1594                                                  1, user_size,
1595                                                  check_credits,
1596                                                  thandle);
1597         } else {
1598                 /* no pages to write, no transno is needed */
1599                 thandle->th_local = 1;
1600         }
1601
1602         if (rc != 0 && !thandle->th_restart_tran)
1603                 osd_fini_iobuf(osd, iobuf);
1604
1605         osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1606
1607         if (unlikely(rc != 0 && !thandle->th_restart_tran)) {
1608                 /* if write fails, we should drop pages from the cache */
1609                 for (i = 0; i < npages; i++) {
1610                         if (lnb[i].lnb_page == NULL)
1611                                 continue;
1612                         if (!PagePrivate2(lnb[i].lnb_page)) {
1613                                 LASSERT(PageLocked(lnb[i].lnb_page));
1614                                 generic_error_remove_page(inode->i_mapping,
1615                                                           lnb[i].lnb_page);
1616                         }
1617                 }
1618         }
1619
1620         RETURN(rc);
1621 }
1622
1623 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1624                          struct niobuf_local *lnb, int npages)
1625 {
1626         struct osd_thread_info *oti = osd_oti_get(env);
1627         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1628         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1629         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1630         int rc = 0, i, cache_hits = 0, cache_misses = 0;
1631         ktime_t start, end;
1632         s64 timediff;
1633         loff_t isize;
1634
1635         LASSERT(inode);
1636
1637         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1638         if (unlikely(rc != 0))
1639                 RETURN(rc);
1640
1641         isize = i_size_read(inode);
1642
1643         start = ktime_get();
1644         for (i = 0; i < npages; i++) {
1645
1646                 if (isize <= lnb[i].lnb_file_offset)
1647                         /* If there's no more data, abort early.
1648                          * lnb->lnb_rc == 0, so it's easy to detect later.
1649                          */
1650                         break;
1651
1652                 /* instead of looking if we go beyong isize, send complete
1653                  * pages all the time
1654                  */
1655                 lnb[i].lnb_rc = lnb[i].lnb_len;
1656
1657                 /* Bypass disk read if fail_loc is set properly */
1658                 if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
1659                         SetPageUptodate(lnb[i].lnb_page);
1660
1661                 if (PageUptodate(lnb[i].lnb_page)) {
1662                         cache_hits++;
1663                         unlock_page(lnb[i].lnb_page);
1664                 } else {
1665                         cache_misses++;
1666                         osd_iobuf_add_page(iobuf, &lnb[i]);
1667                 }
1668                 /* no need to unlock in osd_bufs_put(), the sooner page is
1669                  * unlocked, the earlier another client can access it.
1670                  * notice real unlock_page() can be called few lines
1671                  * below after osd_do_bio(). lnb is a per-thread, so it's
1672                  * fine to have PG_locked and lnb_locked inconsistent here
1673                  */
1674                 lnb[i].lnb_locked = 0;
1675         }
1676         end = ktime_get();
1677         timediff = ktime_us_delta(end, start);
1678         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1679
1680         if (cache_hits != 0)
1681                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1682                                     cache_hits);
1683         if (cache_misses != 0)
1684                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1685                                     cache_misses);
1686         if (cache_hits + cache_misses != 0)
1687                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1688                                     cache_hits + cache_misses);
1689
1690         if (iobuf->dr_npages) {
1691                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1692                                                  0, 0, NULL);
1693                 if (!rc)
1694                         rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1695
1696                 /* IO stats will be done in osd_bufs_put() */
1697
1698                 /* early release to let others read data during the bulk */
1699                 for (i = 0; i < iobuf->dr_npages; i++) {
1700                         LASSERT(PageLocked(iobuf->dr_pages[i]));
1701                         if (!PagePrivate2(iobuf->dr_pages[i]))
1702                                 unlock_page(iobuf->dr_pages[i]);
1703                 }
1704         }
1705
1706         RETURN(rc);
1707 }
1708
1709 /*
1710  * XXX: Another layering violation for now.
1711  *
1712  * We don't want to use ->f_op->read methods, because generic file write
1713  *
1714  *         - serializes on ->i_sem, and
1715  *
1716  *         - does a lot of extra work like balance_dirty_pages(),
1717  *
1718  * which doesn't work for globally shared files like /last_rcvd.
1719  */
1720 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1721 {
1722         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1723
1724         memcpy(buffer, (char *)ei->i_data, buflen);
1725
1726         return  buflen;
1727 }
1728
1729 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1730 {
1731         struct buffer_head *bh;
1732         unsigned long block;
1733         int osize;
1734         int blocksize;
1735         int csize;
1736         int boffs;
1737
1738         /* prevent reading after eof */
1739         spin_lock(&inode->i_lock);
1740         if (i_size_read(inode) < *offs + size) {
1741                 loff_t diff = i_size_read(inode) - *offs;
1742
1743                 spin_unlock(&inode->i_lock);
1744                 if (diff < 0) {
1745                         CDEBUG(D_OTHER,
1746                                "size %llu is too short to read @%llu\n",
1747                                i_size_read(inode), *offs);
1748                         return -EBADR;
1749                 } else if (diff == 0) {
1750                         return 0;
1751                 } else {
1752                         size = diff;
1753                 }
1754         } else {
1755                 spin_unlock(&inode->i_lock);
1756         }
1757
1758         blocksize = 1 << inode->i_blkbits;
1759         osize = size;
1760         while (size > 0) {
1761                 block = *offs >> inode->i_blkbits;
1762                 boffs = *offs & (blocksize - 1);
1763                 csize = min(blocksize - boffs, size);
1764                 bh = __ldiskfs_bread(NULL, inode, block, 0);
1765                 if (IS_ERR(bh)) {
1766                         CERROR("%s: can't read %u@%llu on ino %lu: rc = %ld\n",
1767                                osd_ino2name(inode), csize, *offs, inode->i_ino,
1768                                PTR_ERR(bh));
1769                         return PTR_ERR(bh);
1770                 }
1771
1772                 if (bh != NULL) {
1773                         memcpy(buf, bh->b_data + boffs, csize);
1774                         brelse(bh);
1775                 } else {
1776                         memset(buf, 0, csize);
1777                 }
1778
1779                 *offs += csize;
1780                 buf += csize;
1781                 size -= csize;
1782         }
1783         return osize;
1784 }
1785
1786 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1787                         struct lu_buf *buf, loff_t *pos)
1788 {
1789         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1790         int rc;
1791
1792         /* Read small symlink from inode body as we need to maintain correct
1793          * on-disk symlinks for ldiskfs.
1794          */
1795         if (S_ISLNK(dt->do_lu.lo_header->loh_attr)) {
1796                 loff_t size = i_size_read(inode);
1797
1798                 if (buf->lb_len < size)
1799                         return -EOVERFLOW;
1800
1801                 if (size < sizeof(LDISKFS_I(inode)->i_data))
1802                         rc = osd_ldiskfs_readlink(inode, buf->lb_buf, size);
1803                 else
1804                         rc = osd_ldiskfs_read(inode, buf->lb_buf, size, pos);
1805         } else {
1806                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1807         }
1808
1809         return rc;
1810 }
1811
1812 static inline int osd_extents_enabled(struct super_block *sb,
1813                                       struct inode *inode)
1814 {
1815         if (inode != NULL) {
1816                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1817                         return 1;
1818         } else if (ldiskfs_has_feature_extents(sb)) {
1819                 return 1;
1820         }
1821         return 0;
1822 }
1823
1824 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1825                            const loff_t size, const loff_t pos,
1826                            const int blocks)
1827 {
1828         int credits, bits, bs, i;
1829
1830         bits = sb->s_blocksize_bits;
1831         bs = 1 << bits;
1832
1833         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1834          * we do not expect blockmaps on the large files,
1835          * so let's shrink it to 2 levels (4GB files)
1836          */
1837
1838         /* this is default reservation: 2 levels */
1839         credits = (blocks + 2) * 3;
1840
1841         /* actual offset is unknown, hard to optimize */
1842         if (pos == -1)
1843                 return credits;
1844
1845         /* now check for few specific cases to optimize */
1846         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1847                 /* no indirects */
1848                 credits = blocks;
1849                 /* allocate if not allocated */
1850                 if (inode == NULL) {
1851                         credits += blocks * 2;
1852                         return credits;
1853                 }
1854                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1855                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1856                         if (LDISKFS_I(inode)->i_data[i] == 0)
1857                                 credits += 2;
1858                 }
1859         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1860                 /* single indirect */
1861                 credits = blocks * 3;
1862                 if (inode == NULL ||
1863                     LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1864                         credits += 3;
1865                 else
1866                         /* The indirect block may be modified. */
1867                         credits += 1;
1868         }
1869
1870         return credits;
1871 }
1872
1873 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1874                                  const struct lu_buf *buf, loff_t _pos,
1875                                  struct thandle *handle)
1876 {
1877         struct osd_object  *obj  = osd_dt_obj(dt);
1878         struct inode       *inode = obj->oo_inode;
1879         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1880         struct osd_thandle *oh;
1881         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1882         int                 bits, bs;
1883         int                 depth, size;
1884         loff_t              pos;
1885         ENTRY;
1886
1887         LASSERT(buf != NULL);
1888         LASSERT(handle != NULL);
1889
1890         oh = container_of(handle, struct osd_thandle, ot_super);
1891         LASSERT(oh->ot_handle == NULL);
1892
1893         size = buf->lb_len;
1894         bits = sb->s_blocksize_bits;
1895         bs = 1 << bits;
1896
1897         if (_pos == -1) {
1898                 /* if this is an append, then we
1899                  * should expect cross-block record
1900                  */
1901                 pos = 0;
1902         } else {
1903                 pos = _pos;
1904         }
1905
1906         /* blocks to modify */
1907         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1908         LASSERT(blocks > 0);
1909
1910         if (inode != NULL && _pos != -1) {
1911                 /* object size in blocks */
1912                 est = (i_size_read(inode) + bs - 1) >> bits;
1913                 allocated = inode->i_blocks >> (bits - 9);
1914                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1915                         /* looks like an overwrite, no need to modify tree */
1916                         credits = blocks;
1917                         /* no need to modify i_size */
1918                         goto out;
1919                 }
1920         }
1921
1922         if (osd_extents_enabled(sb, inode)) {
1923                 /*
1924                  * many concurrent threads may grow tree by the time
1925                  * our transaction starts. so, consider 2 is a min depth
1926                  * for every level we may need to allocate a new block
1927                  * and take some entries from the old one. so, 3 blocks
1928                  * to allocate (bitmap, gd, itself) + old block - 4 per
1929                  * level.
1930                  */
1931                 depth = inode != NULL ? ext_depth(inode) : 0;
1932                 depth = max(depth, 1) + 1;
1933                 credits = depth;
1934                 /* if not append, then split may need to modify
1935                  * existing blocks moving entries into the new ones
1936                  */
1937                 if (_pos != -1)
1938                         credits += depth;
1939                 /* blocks to store data: bitmap,gd,itself */
1940                 credits += blocks * 3;
1941         } else {
1942                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1943         }
1944         /* if inode is created as part of the transaction,
1945          * then it's counted already by the creation method
1946          */
1947         if (inode != NULL)
1948                 credits++;
1949
1950 out:
1951
1952         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1953
1954         /* dt_declare_write() is usually called for system objects, such
1955          * as llog or last_rcvd files. We needn't enforce quota on those
1956          * objects, so always set the lqi_space as 0.
1957          */
1958         if (inode != NULL)
1959                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1960                                            i_gid_read(inode),
1961                                            i_projid_read(inode), 0,
1962                                            oh, obj, NULL, OSD_QID_BLK);
1963
1964         if (rc == 0)
1965                 rc = osd_trunc_lock(obj, oh, true);
1966
1967         RETURN(rc);
1968 }
1969
1970 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1971 {
1972         /* LU-2634: clear the extent format for fast symlink */
1973         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1974
1975         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1976         spin_lock(&inode->i_lock);
1977         LDISKFS_I(inode)->i_disksize = buflen;
1978         i_size_write(inode, buflen);
1979         spin_unlock(&inode->i_lock);
1980         osd_dirty_inode(inode, I_DIRTY_DATASYNC);
1981
1982         return 0;
1983 }
1984
1985 static int osd_ldiskfs_write_record(struct dt_object *dt, void *buf,
1986                                     int bufsize, int write_NUL, loff_t *offs,
1987                                     handle_t *handle)
1988 {
1989         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1990         struct buffer_head *bh        = NULL;
1991         loff_t              offset    = *offs;
1992         loff_t              new_size  = i_size_read(inode);
1993         unsigned long       block;
1994         int                 blocksize = 1 << inode->i_blkbits;
1995         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1996         int                 err = 0;
1997         int                 size;
1998         int                 boffs;
1999         int                 dirty_inode = 0;
2000         bool create, sparse, sync = false;
2001
2002         if (write_NUL) {
2003                 /*
2004                  * long symlink write does not count the NUL terminator in
2005                  * bufsize, we write it, and the inode's file size does not
2006                  * count the NUL terminator as well.
2007                  */
2008                 ((char *)buf)[bufsize] = '\0';
2009                 ++bufsize;
2010         }
2011
2012         /* only the first flag-set matters */
2013         dirty_inode = !test_and_set_bit(LDISKFS_INODE_JOURNAL_DATA,
2014                                        &ei->i_flags);
2015
2016         /* sparse checking is racy, but sparse is very rare case, leave as is */
2017         sparse = (new_size > 0 && (inode->i_blocks >> (inode->i_blkbits - 9)) <
2018                   ((new_size - 1) >> inode->i_blkbits) + 1);
2019
2020         while (bufsize > 0) {
2021                 int credits = handle->h_buffer_credits;
2022                 unsigned long last_block = (new_size == 0) ? 0 :
2023                                            (new_size - 1) >> inode->i_blkbits;
2024
2025                 if (bh)
2026                         brelse(bh);
2027
2028                 block = offset >> inode->i_blkbits;
2029                 boffs = offset & (blocksize - 1);
2030                 size = min(blocksize - boffs, bufsize);
2031                 sync = (block > last_block || new_size == 0 || sparse);
2032
2033                 if (sync)
2034                         down(&ei->i_append_sem);
2035
2036                 bh = __ldiskfs_bread(handle, inode, block, 0);
2037
2038                 if (unlikely(IS_ERR_OR_NULL(bh) && !sync))
2039                         CWARN(
2040                               "%s: adding bh without locking off %llu (block %lu, size %d, offs %llu)\n",
2041                               osd_ino2name(inode),
2042                               offset, block, bufsize, *offs);
2043
2044                 if (IS_ERR_OR_NULL(bh)) {
2045                         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2046                         int flags = LDISKFS_GET_BLOCKS_CREATE;
2047
2048                         /* while the file system is being mounted, avoid
2049                          * preallocation otherwise mount can take a long
2050                          * time as mballoc cache is cold.
2051                          * XXX: this is a workaround until we have a proper
2052                          *      fix in mballoc
2053                          * XXX: works with extent-based files only */
2054                         if (!osd->od_cl_seq)
2055                                 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
2056                         bh = __ldiskfs_bread(handle, inode, block, flags);
2057                         create = true;
2058                 } else {
2059                         if (sync) {
2060                                 up(&ei->i_append_sem);
2061                                 sync = false;
2062                         }
2063                         create = false;
2064                 }
2065                 if (IS_ERR_OR_NULL(bh)) {
2066                         if (bh == NULL) {
2067                                 err = -EIO;
2068                         } else {
2069                                 err = PTR_ERR(bh);
2070                                 bh = NULL;
2071                         }
2072
2073                         CERROR(
2074                                "%s: error reading offset %llu (block %lu, size %d, offs %llu), credits %d/%d: rc = %d\n",
2075                                osd_ino2name(inode), offset, block, bufsize,
2076                                *offs, credits, handle->h_buffer_credits, err);
2077                         break;
2078                 }
2079
2080                 err = ldiskfs_journal_get_write_access(handle, bh);
2081                 if (err) {
2082                         CERROR("journal_get_write_access() returned error %d\n",
2083                                err);
2084                         break;
2085                 }
2086                 LASSERTF(boffs + size <= bh->b_size,
2087                          "boffs %d size %d bh->b_size %lu\n",
2088                          boffs, size, (unsigned long)bh->b_size);
2089                 if (create) {
2090                         memset(bh->b_data, 0, bh->b_size);
2091                         if (sync) {
2092                                 up(&ei->i_append_sem);
2093                                 sync = false;
2094                         }
2095                 }
2096                 memcpy(bh->b_data + boffs, buf, size);
2097                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2098                 if (err)
2099                         break;
2100
2101                 if (offset + size > new_size)
2102                         new_size = offset + size;
2103                 offset += size;
2104                 bufsize -= size;
2105                 buf += size;
2106         }
2107         if (sync)
2108                 up(&ei->i_append_sem);
2109
2110         if (bh)
2111                 brelse(bh);
2112
2113         if (write_NUL)
2114                 --new_size;
2115         /* correct in-core and on-disk sizes */
2116         if (new_size > i_size_read(inode)) {
2117                 spin_lock(&inode->i_lock);
2118                 if (new_size > i_size_read(inode))
2119                         i_size_write(inode, new_size);
2120                 if (i_size_read(inode) > ei->i_disksize) {
2121                         ei->i_disksize = i_size_read(inode);
2122                         dirty_inode = 1;
2123                 }
2124                 spin_unlock(&inode->i_lock);
2125         }
2126         if (dirty_inode)
2127                 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2128
2129         if (err == 0)
2130                 *offs = offset;
2131         return err;
2132 }
2133
2134 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
2135                          const struct lu_buf *buf, loff_t *pos,
2136                          struct thandle *handle)
2137 {
2138         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
2139         struct osd_thandle      *oh;
2140         ssize_t                 result;
2141         int                     is_link;
2142
2143         LASSERT(dt_object_exists(dt));
2144
2145         LASSERT(handle != NULL);
2146         LASSERT(inode != NULL);
2147         dquot_initialize(inode);
2148
2149         /* XXX: don't check: one declared chunk can be used many times */
2150         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
2151
2152         oh = container_of(handle, struct osd_thandle, ot_super);
2153         LASSERT(oh->ot_handle->h_transaction != NULL);
2154         osd_trans_exec_op(env, handle, OSD_OT_WRITE);
2155
2156         /* Write small symlink to inode body as we need to maintain correct
2157          * on-disk symlinks for ldiskfs.
2158          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
2159          * does not count it in.
2160          */
2161         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
2162         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
2163                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
2164         else
2165                 result = osd_ldiskfs_write_record(dt, buf->lb_buf, buf->lb_len,
2166                                                   is_link, pos, oh->ot_handle);
2167         if (result == 0)
2168                 result = buf->lb_len;
2169
2170         osd_trans_exec_check(env, handle, OSD_OT_WRITE);
2171
2172         return result;
2173 }
2174
2175 static int osd_declare_fallocate(const struct lu_env *env,
2176                                  struct dt_object *dt, __u64 start, __u64 end,
2177                                  int mode, struct thandle *th)
2178 {
2179         struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2180         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2181         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2182         long long quota_space = 0;
2183         /* 5 is max tree depth. (inode + 4 index blocks) */
2184         int depth = 5;
2185         int rc;
2186
2187         ENTRY;
2188
2189         /*
2190          * mode == 0 (which is standard prealloc) and PUNCH is supported
2191          * Rest of mode options is not supported yet.
2192          */
2193         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2194                 RETURN(-EOPNOTSUPP);
2195
2196         /* disable fallocate completely */
2197         if (osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks < 0)
2198                 RETURN(-EOPNOTSUPP);
2199
2200         LASSERT(th);
2201         LASSERT(inode);
2202
2203         if (mode & FALLOC_FL_PUNCH_HOLE) {
2204                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
2205                                            i_gid_read(inode),
2206                                            i_projid_read(inode), 0, oh,
2207                                            osd_dt_obj(dt), NULL, OSD_QID_BLK);
2208                 if (rc == 0)
2209                         rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
2210                 RETURN(rc);
2211         }
2212
2213         /* quota space for metadata blocks
2214          * approximate metadata estimate should be good enough.
2215          */
2216         quota_space += PAGE_SIZE;
2217         quota_space += depth * LDISKFS_BLOCK_SIZE(osd_sb(osd));
2218
2219         /* quota space should be reported in 1K blocks */
2220         quota_space = toqb(quota_space) + toqb(end - start) +
2221                       LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
2222
2223         /* We don't need to reserve credits for whole fallocate here.
2224          * We reserve space only for metadata. Fallocate credits are
2225          * extended as required
2226          */
2227         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2228                                    i_projid_read(inode), quota_space, oh,
2229                                    osd_dt_obj(dt), NULL, OSD_QID_BLK);
2230         RETURN(rc);
2231 }
2232
2233 static int osd_fallocate_preallocate(const struct lu_env *env,
2234                                      struct dt_object *dt,
2235                                      __u64 start, __u64 end, int mode,
2236                                      struct thandle *th)
2237 {
2238         struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2239         handle_t *handle = ldiskfs_journal_current_handle();
2240         unsigned int save_credits = oh->ot_credits;
2241         struct osd_object *obj = osd_dt_obj(dt);
2242         struct inode *inode = obj->oo_inode;
2243         struct ldiskfs_map_blocks map;
2244         unsigned int credits;
2245         ldiskfs_lblk_t blen;
2246         ldiskfs_lblk_t boff;
2247         loff_t new_size = 0;
2248         int depth = 0;
2249         int flags;
2250         int rc = 0;
2251
2252         ENTRY;
2253
2254         LASSERT(dt_object_exists(dt));
2255         LASSERT(osd_invariant(obj));
2256         LASSERT(inode != NULL);
2257
2258         CDEBUG(D_INODE, "fallocate: inode #%lu: start %llu end %llu mode %d\n",
2259                inode->i_ino, start, end, mode);
2260
2261         dquot_initialize(inode);
2262
2263         LASSERT(th);
2264
2265         boff = start >> inode->i_blkbits;
2266         blen = (ALIGN(end, 1 << inode->i_blkbits) >> inode->i_blkbits) - boff;
2267
2268         /* Create and mark new extents as either zero or unwritten */
2269         flags = osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks ?
2270                 LDISKFS_GET_BLOCKS_CREATE_ZERO :
2271                 LDISKFS_GET_BLOCKS_CREATE_UNWRIT_EXT;
2272         if (mode & FALLOC_FL_KEEP_SIZE)
2273                 flags |= LDISKFS_GET_BLOCKS_KEEP_SIZE;
2274
2275         inode_lock(inode);
2276
2277         /*
2278          * We only support preallocation for extent-based file only.
2279          */
2280         if (!(ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)))
2281                 GOTO(out, rc = -EOPNOTSUPP);
2282
2283         if (!(mode & FALLOC_FL_KEEP_SIZE) && (end > i_size_read(inode) ||
2284             end > LDISKFS_I(inode)->i_disksize)) {
2285                 new_size = end;
2286                 rc = inode_newsize_ok(inode, new_size);
2287                 if (rc)
2288                         GOTO(out, rc);
2289         }
2290
2291         inode_dio_wait(inode);
2292
2293         map.m_lblk = boff;
2294         map.m_len = blen;
2295
2296         /* Don't normalize the request if it can fit in one extent so
2297          * that it doesn't get unnecessarily split into multiple extents.
2298          */
2299         if (blen <= EXT_UNWRITTEN_MAX_LEN)
2300                 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
2301
2302         /*
2303          * credits to insert 1 extent into extent tree.
2304          */
2305         credits = osd_chunk_trans_blocks(inode, blen);
2306         depth = ext_depth(inode);
2307
2308         while (rc >= 0 && blen) {
2309                 loff_t epos;
2310
2311                 /*
2312                  * Recalculate credits when extent tree depth changes.
2313                  */
2314                 if (depth != ext_depth(inode)) {
2315                         credits = osd_chunk_trans_blocks(inode, blen);
2316                         depth = ext_depth(inode);
2317                 }
2318
2319                 /* TODO: quota check */
2320                 rc = osd_extend_restart_trans(handle, credits, inode);
2321                 if (rc)
2322                         break;
2323
2324                 rc = ldiskfs_map_blocks(handle, inode, &map, flags);
2325                 if (rc <= 0) {
2326                         CDEBUG(D_INODE,
2327                                "inode #%lu: block %u: len %u: ldiskfs_map_blocks returned %d\n",
2328                                inode->i_ino, map.m_lblk, map.m_len, rc);
2329                         ldiskfs_mark_inode_dirty(handle, inode);
2330                         break;
2331                 }
2332
2333                 map.m_lblk += rc;
2334                 map.m_len = blen = blen - rc;
2335                 epos = (loff_t)map.m_lblk << inode->i_blkbits;
2336                 inode->i_ctime = current_time(inode);
2337                 if (new_size) {
2338                         if (epos > end)
2339                                 epos = end;
2340                         if (ldiskfs_update_inode_size(inode, epos) & 0x1)
2341                                 inode->i_mtime = inode->i_ctime;
2342                 } else {
2343                         if (epos > inode->i_size)
2344                                 ldiskfs_set_inode_flag(inode,
2345                                                        LDISKFS_INODE_EOFBLOCKS);
2346                 }
2347
2348                 ldiskfs_mark_inode_dirty(handle, inode);
2349         }
2350
2351 out:
2352         /* extand credits if needed for operations such as attribute set */
2353         if (rc >= 0)
2354                 rc = osd_extend_restart_trans(handle, save_credits, inode);
2355
2356         inode_unlock(inode);
2357
2358         RETURN(rc);
2359 }
2360
2361 static int osd_fallocate_punch(const struct lu_env *env, struct dt_object *dt,
2362                                __u64 start, __u64 end, int mode,
2363                                struct thandle *th)
2364 {
2365         struct osd_object *obj = osd_dt_obj(dt);
2366         struct inode *inode = obj->oo_inode;
2367         struct osd_access_lock *al;
2368         struct osd_thandle *oh;
2369         int rc = 0, found = 0;
2370
2371         ENTRY;
2372
2373         LASSERT(dt_object_exists(dt));
2374         LASSERT(osd_invariant(obj));
2375         LASSERT(inode != NULL);
2376
2377         dquot_initialize(inode);
2378
2379         LASSERT(th);
2380         oh = container_of(th, struct osd_thandle, ot_super);
2381         LASSERT(oh->ot_handle->h_transaction != NULL);
2382
2383         list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2384                 if (obj != al->tl_obj)
2385                         continue;
2386                 LASSERT(al->tl_shared == 0);
2387                 found = 1;
2388                 /* do actual punch in osd_trans_stop() */
2389                 al->tl_start = start;
2390                 al->tl_end = end;
2391                 al->tl_mode = mode;
2392                 al->tl_punch = true;
2393                 break;
2394         }
2395
2396         RETURN(rc);
2397 }
2398
2399 static int osd_fallocate(const struct lu_env *env, struct dt_object *dt,
2400                          __u64 start, __u64 end, int mode, struct thandle *th)
2401 {
2402         int rc;
2403
2404         ENTRY;
2405
2406         if (mode & FALLOC_FL_PUNCH_HOLE) {
2407                 /* punch */
2408                 rc = osd_fallocate_punch(env, dt, start, end, mode, th);
2409         } else {
2410                 /* standard preallocate */
2411                 rc = osd_fallocate_preallocate(env, dt, start, end, mode, th);
2412         }
2413         RETURN(rc);
2414 }
2415
2416 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
2417                              __u64 start, __u64 end, struct thandle *th)
2418 {
2419         struct osd_thandle *oh;
2420         struct inode       *inode;
2421         int                 rc;
2422         ENTRY;
2423
2424         LASSERT(th);
2425         oh = container_of(th, struct osd_thandle, ot_super);
2426
2427         /*
2428          * we don't need to reserve credits for whole truncate
2429          * it's not possible as truncate may need to free too many
2430          * blocks and that won't fit a single transaction. instead
2431          * we reserve credits to change i_size and put inode onto
2432          * orphan list. if needed truncate will extend or restart
2433          * transaction
2434          */
2435         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
2436                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
2437
2438         inode = osd_dt_obj(dt)->oo_inode;
2439         LASSERT(inode);
2440
2441         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2442                                    i_projid_read(inode), 0, oh, osd_dt_obj(dt),
2443                                    NULL, OSD_QID_BLK);
2444
2445         if (rc == 0)
2446                 rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
2447
2448         RETURN(rc);
2449 }
2450
2451 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
2452                      __u64 start, __u64 end, struct thandle *th)
2453 {
2454         struct osd_object *obj = osd_dt_obj(dt);
2455         struct osd_device *osd = osd_obj2dev(obj);
2456         struct inode *inode = obj->oo_inode;
2457         struct osd_access_lock *al;
2458         struct osd_thandle *oh;
2459         int rc = 0, found = 0;
2460         bool grow = false;
2461         ENTRY;
2462
2463         LASSERT(dt_object_exists(dt));
2464         LASSERT(osd_invariant(obj));
2465         LASSERT(inode != NULL);
2466         dquot_initialize(inode);
2467
2468         LASSERT(th);
2469         oh = container_of(th, struct osd_thandle, ot_super);
2470         LASSERT(oh->ot_handle->h_transaction != NULL);
2471
2472         /* we used to skip truncate to current size to
2473          * optimize truncates on OST. with DoM we can
2474          * get attr_set to set specific size (MDS_REINT)
2475          * and then get truncate RPC which essentially
2476          * would be skipped. this is bad.. so, disable
2477          * this optimization on MDS till the client stop
2478          * to sent MDS_REINT (LU-11033) -bzzz
2479          */
2480         if (osd->od_is_ost && i_size_read(inode) == start)
2481                 RETURN(0);
2482
2483         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
2484
2485         spin_lock(&inode->i_lock);
2486         if (i_size_read(inode) < start)
2487                 grow = true;
2488         i_size_write(inode, start);
2489         spin_unlock(&inode->i_lock);
2490         /* if object holds encrypted content, we need to make sure we truncate
2491          * on an encryption unit boundary, or subsequent reads will get
2492          * corrupted content
2493          */
2494         if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2495             start & ~LUSTRE_ENCRYPTION_MASK)
2496                 start = (start & LUSTRE_ENCRYPTION_MASK) +
2497                         LUSTRE_ENCRYPTION_UNIT_SIZE;
2498         ll_truncate_pagecache(inode, start);
2499
2500         /* optimize grow case */
2501         if (grow) {
2502                 osd_execute_truncate(obj);
2503                 GOTO(out, rc);
2504         }
2505
2506         inode_lock(inode);
2507         /* add to orphan list to ensure truncate completion
2508          * if this transaction succeed. ldiskfs_truncate()
2509          * will take the inode out of the list
2510          */
2511         rc = ldiskfs_orphan_add(oh->ot_handle, inode);
2512         inode_unlock(inode);
2513         if (rc != 0)
2514                 GOTO(out, rc);
2515
2516         list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2517                 if (obj != al->tl_obj)
2518                         continue;
2519                 LASSERT(al->tl_shared == 0);
2520                 found = 1;
2521                 /* do actual truncate in osd_trans_stop() */
2522                 al->tl_truncate = 1;
2523                 break;
2524         }
2525         LASSERT(found);
2526
2527 out:
2528         RETURN(rc);
2529 }
2530
2531 static int fiemap_check_ranges(struct inode *inode,
2532                                u64 start, u64 len, u64 *new_len)
2533 {
2534         loff_t maxbytes;
2535
2536         *new_len = len;
2537
2538         if (len == 0)
2539                 return -EINVAL;
2540
2541         if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
2542                 maxbytes = inode->i_sb->s_maxbytes;
2543         else
2544                 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
2545
2546         if (start > maxbytes)
2547                 return -EFBIG;
2548
2549         /*
2550          * Shrink request scope to what the fs can actually handle.
2551          */
2552         if (len > maxbytes || (maxbytes - len) < start)
2553                 *new_len = maxbytes - start;
2554
2555         return 0;
2556 }
2557
2558 /* So that the fiemap access checks can't overflow on 32 bit machines. */
2559 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
2560
2561 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
2562                           struct fiemap *fm)
2563 {
2564         struct fiemap_extent_info fieinfo = {0, };
2565         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2566         u64 len;
2567         int rc;
2568
2569         LASSERT(inode);
2570         if (inode->i_op->fiemap == NULL)
2571                 return -EOPNOTSUPP;
2572
2573         if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
2574                 return -EINVAL;
2575
2576         rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
2577         if (rc)
2578                 return rc;
2579
2580         fieinfo.fi_flags = fm->fm_flags;
2581         fieinfo.fi_extents_max = fm->fm_extent_count;
2582         fieinfo.fi_extents_start = fm->fm_extents;
2583
2584         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
2585                 filemap_write_and_wait(inode->i_mapping);
2586
2587         rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2588         fm->fm_flags = fieinfo.fi_flags;
2589         fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2590
2591         return rc;
2592 }
2593
2594 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2595                        __u64 start, __u64 end, enum lu_ladvise_type advice)
2596 {
2597         struct osd_object *obj = osd_dt_obj(dt);
2598         int rc = 0;
2599         ENTRY;
2600
2601         switch (advice) {
2602         case LU_LADVISE_DONTNEED:
2603                 if (end)
2604                         invalidate_mapping_pages(obj->oo_inode->i_mapping,
2605                                                  start >> PAGE_SHIFT,
2606                                                  (end - 1) >> PAGE_SHIFT);
2607                 break;
2608         default:
2609                 rc = -ENOTSUPP;
2610                 break;
2611         }
2612
2613         RETURN(rc);
2614 }
2615
2616 static loff_t osd_lseek(const struct lu_env *env, struct dt_object *dt,
2617                         loff_t offset, int whence)
2618 {
2619         struct osd_object *obj = osd_dt_obj(dt);
2620         struct inode *inode = obj->oo_inode;
2621         struct file *file;
2622         loff_t result;
2623
2624         ENTRY;
2625
2626         LASSERT(dt_object_exists(dt));
2627         LASSERT(osd_invariant(obj));
2628         LASSERT(inode);
2629         LASSERT(offset >= 0);
2630
2631         file = osd_quasi_file(env, inode);
2632         result = file->f_op->llseek(file, offset, whence);
2633
2634         /*
2635          * If 'offset' is beyond end of object file then treat it as not error
2636          * but valid case for SEEK_HOLE and return 'offset' as result.
2637          * LOV will decide if it is beyond real end of file or not.
2638          */
2639         if (whence == SEEK_HOLE && result == -ENXIO)
2640                 result = offset;
2641
2642         CDEBUG(D_INFO, "seek %s from %lld: %lld\n", whence == SEEK_HOLE ?
2643                        "hole" : "data", offset, result);
2644         RETURN(result);
2645 }
2646
2647 /*
2648  * in some cases we may need declare methods for objects being created
2649  * e.g., when we create symlink
2650  */
2651 const struct dt_body_operations osd_body_ops_new = {
2652         .dbo_declare_write = osd_declare_write,
2653 };
2654
2655 const struct dt_body_operations osd_body_ops = {
2656         .dbo_read                       = osd_read,
2657         .dbo_declare_write              = osd_declare_write,
2658         .dbo_write                      = osd_write,
2659         .dbo_bufs_get                   = osd_bufs_get,
2660         .dbo_bufs_put                   = osd_bufs_put,
2661         .dbo_write_prep                 = osd_write_prep,
2662         .dbo_declare_write_commit       = osd_declare_write_commit,
2663         .dbo_write_commit               = osd_write_commit,
2664         .dbo_read_prep                  = osd_read_prep,
2665         .dbo_declare_punch              = osd_declare_punch,
2666         .dbo_punch                      = osd_punch,
2667         .dbo_fiemap_get                 = osd_fiemap_get,
2668         .dbo_ladvise                    = osd_ladvise,
2669         .dbo_declare_fallocate          = osd_declare_fallocate,
2670         .dbo_fallocate                  = osd_fallocate,
2671         .dbo_lseek                      = osd_lseek,
2672 };
2673
2674 /**
2675  * Get a truncate lock
2676  *
2677  * In order to take multi-transaction truncate out of main transaction we let
2678  * the caller grab a lock on the object passed. the lock can be shared (for
2679  * writes) and exclusive (for truncate). It's not allowed to mix truncate
2680  * and write in the same transaction handle (do not confuse with big ldiskfs
2681  * transaction containing lots of handles).
2682  * The lock must be taken at declaration.
2683  *
2684  * \param obj           object to lock
2685  * \oh                  transaction
2686  * \shared              shared or exclusive
2687  *
2688  * \retval 0            lock is granted
2689  * \retval -NOMEM       no memory to allocate lock
2690  */
2691 int osd_trunc_lock(struct osd_object *obj, struct osd_thandle *oh, bool shared)
2692 {
2693         struct osd_access_lock *al, *tmp;
2694
2695         LASSERT(obj);
2696         LASSERT(oh);
2697
2698         list_for_each_entry(tmp, &oh->ot_trunc_locks, tl_list) {
2699                 if (tmp->tl_obj != obj)
2700                         continue;
2701                 LASSERT(tmp->tl_shared == shared);
2702                 /* found same lock */
2703                 return 0;
2704         }
2705
2706         OBD_ALLOC_PTR(al);
2707         if (unlikely(al == NULL))
2708                 return -ENOMEM;
2709         al->tl_obj = obj;
2710         al->tl_truncate = false;
2711         if (shared)
2712                 down_read(&obj->oo_ext_idx_sem);
2713         else
2714                 down_write(&obj->oo_ext_idx_sem);
2715         al->tl_shared = shared;
2716         lu_object_get(&obj->oo_dt.do_lu);
2717
2718         list_add(&al->tl_list, &oh->ot_trunc_locks);
2719
2720         return 0;
2721 }
2722
2723 void osd_trunc_unlock_all(const struct lu_env *env, struct list_head *list)
2724 {
2725         struct osd_access_lock *al, *tmp;
2726
2727         list_for_each_entry_safe(al, tmp, list, tl_list) {
2728                 if (al->tl_shared)
2729                         up_read(&al->tl_obj->oo_ext_idx_sem);
2730                 else
2731                         up_write(&al->tl_obj->oo_ext_idx_sem);
2732                 osd_object_put(env, al->tl_obj);
2733                 list_del(&al->tl_list);
2734                 OBD_FREE_PTR(al);
2735         }
2736 }
2737
2738 /*
2739  * For a partial-page truncate, flush the page to disk immediately to
2740  * avoid data corruption during direct disk write.  b=17397
2741  */
2742 static void osd_partial_page_flush(struct osd_device *d, struct inode *inode,
2743                                    loff_t offset)
2744 {
2745         if (!(offset & ~PAGE_MASK))
2746                 return;
2747
2748         if (osd_use_page_cache(d)) {
2749                 filemap_fdatawrite_range(inode->i_mapping, offset, offset + 1);
2750         } else {
2751                 /* Notice we use "wait" version to ensure I/O is complete */
2752                 filemap_write_and_wait_range(inode->i_mapping, offset,
2753                                              offset + 1);
2754                 invalidate_mapping_pages(inode->i_mapping, offset >> PAGE_SHIFT,
2755                                          offset >> PAGE_SHIFT);
2756         }
2757 }
2758
2759 void osd_execute_truncate(struct osd_object *obj)
2760 {
2761         struct osd_device *d = osd_obj2dev(obj);
2762         struct inode *inode = obj->oo_inode;
2763         __u64 size;
2764
2765         /* simulate crash before (in the middle) of delayed truncate */
2766         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FAIL_AT_TRUNCATE)) {
2767                 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2768                 struct ldiskfs_sb_info *sbi = LDISKFS_SB(inode->i_sb);
2769
2770                 mutex_lock(&sbi->s_orphan_lock);
2771                 list_del_init(&ei->i_orphan);
2772                 mutex_unlock(&sbi->s_orphan_lock);
2773                 return;
2774         }
2775
2776         size = i_size_read(inode);
2777         inode_lock(inode);
2778         /* if object holds encrypted content, we need to make sure we truncate
2779          * on an encryption unit boundary, or block content will get corrupted
2780          */
2781         if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2782             size & ~LUSTRE_ENCRYPTION_MASK)
2783                 inode->i_size = (size & LUSTRE_ENCRYPTION_MASK) +
2784                         LUSTRE_ENCRYPTION_UNIT_SIZE;
2785         ldiskfs_truncate(inode);
2786         inode_unlock(inode);
2787         if (inode->i_size != size) {
2788                 spin_lock(&inode->i_lock);
2789                 i_size_write(inode, size);
2790                 LDISKFS_I(inode)->i_disksize = size;
2791                 spin_unlock(&inode->i_lock);
2792                 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2793         }
2794         osd_partial_page_flush(d, inode, size);
2795 }
2796
2797 void osd_execute_punch(const struct lu_env *env, struct osd_object *obj,
2798                        loff_t start, loff_t end, int mode)
2799 {
2800         struct osd_device *d = osd_obj2dev(obj);
2801         struct inode *inode = obj->oo_inode;
2802         struct file *file = osd_quasi_file(env, inode);
2803
2804         file->f_op->fallocate(file, mode, start, end - start);
2805         osd_partial_page_flush(d, inode, start);
2806         osd_partial_page_flush(d, inode, end - 1);
2807 }
2808
2809 void osd_process_truncates(const struct lu_env *env, struct list_head *list)
2810 {
2811         struct osd_access_lock *al;
2812
2813         LASSERT(journal_current_handle() == NULL);
2814
2815         list_for_each_entry(al, list, tl_list) {
2816                 if (al->tl_shared)
2817                         continue;
2818                 if (al->tl_truncate)
2819                         osd_execute_truncate(al->tl_obj);
2820                 else if (al->tl_punch)
2821                         osd_execute_punch(env, al->tl_obj, al->tl_start,
2822                                           al->tl_end, al->tl_mode);
2823         }
2824 }