Whamcloud - gitweb
LU-16847 ldiskfs: refactor code.
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/osd/osd_io.c
32  *
33  * body operations
34  *
35  * Author: Nikita Danilov <nikita@clusterfs.com>
36  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
37  *
38  */
39
40 #define DEBUG_SUBSYSTEM S_OSD
41
42 /* prerequisite for linux/xattr.h */
43 #include <linux/types.h>
44 /* prerequisite for linux/xattr.h */
45 #include <linux/fs.h>
46 #include <linux/mm.h>
47 #include <linux/swap.h>
48 #include <linux/pagevec.h>
49
50 /*
51  * struct OBD_{ALLOC,FREE}*()
52  */
53 #include <obd_support.h>
54 #include <libcfs/libcfs.h>
55
56 #include "osd_internal.h"
57
58 /* ext_depth() */
59 #include <ldiskfs/ldiskfs_extents.h>
60 #include <ldiskfs/ldiskfs.h>
61
62 #ifndef SECTOR_SHIFT
63 #define SECTOR_SHIFT 9
64 #endif
65
66 struct kmem_cache *biop_cachep;
67
68 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
69 static void dio_complete_routine(struct bio *bio);
70 #else
71 static void dio_complete_routine(struct bio *bio, int error);
72 #endif
73
74 static int osd_bio_init(struct bio *bio, struct osd_iobuf *iobuf,
75                         int start_page_idx)
76 {
77         struct osd_bio_private *bio_private = NULL;
78         ENTRY;
79
80         OBD_SLAB_ALLOC_GFP(bio_private, biop_cachep, sizeof(*bio_private),
81                            GFP_NOIO);
82         if (bio_private == NULL)
83                 RETURN(-ENOMEM);
84
85         bio->bi_end_io = dio_complete_routine;
86         bio->bi_private = bio_private;
87         bio_private->obp_start_page_idx = start_page_idx;
88         bio_private->obp_iobuf = iobuf;
89
90         RETURN(0);
91 }
92
93 static void osd_bio_fini(struct bio *bio)
94 {
95         struct osd_bio_private *bio_private;
96
97         if (!bio)
98                 return;
99         bio_private = bio->bi_private;
100         bio_put(bio);
101         OBD_SLAB_FREE(bio_private, biop_cachep, sizeof(*bio_private));
102 }
103
104 static inline bool osd_use_page_cache(struct osd_device *d)
105 {
106         /* do not use pagecache if write and read caching are disabled */
107         if (d->od_writethrough_cache + d->od_read_cache == 0)
108                 return false;
109         /* use pagecache by default */
110         return true;
111 }
112
113 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
114                             struct inode *inode,
115                             int rw, const short line, int pages)
116 {
117         int blocks, i;
118
119         LASSERTF(iobuf->dr_elapsed_valid == 0,
120                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
121                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
122                  iobuf->dr_init_at);
123         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
124
125         init_waitqueue_head(&iobuf->dr_wait);
126         atomic_set(&iobuf->dr_numreqs, 0);
127         iobuf->dr_npages = 0;
128         iobuf->dr_error = 0;
129         iobuf->dr_dev = d;
130         iobuf->dr_frags = 0;
131         iobuf->dr_elapsed = ktime_set(0, 0);
132         /* must be counted before, so assert */
133         iobuf->dr_rw = rw;
134         iobuf->dr_init_at = line;
135         iobuf->dr_inode = inode;
136
137         /* Init dr_start_pg_wblks to 0 for osd_read/write_prep().
138          * For osd_write_commit() need to keep the value assigned in
139          * osd_ldiskfs_map_inode_pages() during retries, and before it ,
140          * init dr_start_pg_wblks to 0 in osd_write_prep() is sufficient.
141          */
142         if (rw == 0)
143                 iobuf->dr_start_pg_wblks = 0;
144
145         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
146         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
147                 LASSERT(iobuf->dr_pg_buf.lb_len >=
148                         pages * sizeof(iobuf->dr_pages[0]));
149                 return 0;
150         }
151
152         /* start with 1MB for 4K blocks */
153         i = 256;
154         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
155                 i <<= 1;
156
157         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
158                (unsigned int)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
159         pages = i;
160         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
161         iobuf->dr_max_pages = 0;
162         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
163                (unsigned int)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
164
165         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
166         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
167         if (unlikely(iobuf->dr_blocks == NULL))
168                 return -ENOMEM;
169
170         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
171         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
172         if (unlikely(iobuf->dr_pages == NULL))
173                 return -ENOMEM;
174
175         lu_buf_realloc(&iobuf->dr_lnb_buf,
176                        pages * sizeof(iobuf->dr_lnbs[0]));
177         iobuf->dr_lnbs = iobuf->dr_lnb_buf.lb_buf;
178         if (unlikely(iobuf->dr_lnbs == NULL))
179                 return -ENOMEM;
180
181         iobuf->dr_max_pages = pages;
182
183         return 0;
184 }
185
186 #define osd_init_iobuf(dev, iobuf, inode, rw, pages)                    \
187 ({                                                                      \
188         int __r;                                                        \
189         BUILD_BUG_ON(__LINE__ >= (1 << 16));                            \
190         __r = __osd_init_iobuf(dev, iobuf, inode, rw, __LINE__, pages); \
191         __r;                                                            \
192 })
193
194 static void osd_iobuf_add_page(struct osd_iobuf *iobuf,
195                                struct niobuf_local *lnb)
196 {
197         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
198         iobuf->dr_pages[iobuf->dr_npages] = lnb->lnb_page;
199         iobuf->dr_lnbs[iobuf->dr_npages] = lnb;
200         iobuf->dr_npages++;
201 }
202
203 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
204 {
205         int rw = iobuf->dr_rw;
206
207         if (iobuf->dr_elapsed_valid) {
208                 struct brw_stats *h = &d->od_brw_stats;
209
210                 iobuf->dr_elapsed_valid = 0;
211                 LASSERT(iobuf->dr_dev == d);
212                 LASSERT(iobuf->dr_frags > 0);
213                 lprocfs_oh_tally_pcpu(&h->bs_hist[BRW_R_DIO_FRAGS+rw],
214                                       iobuf->dr_frags);
215                 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[BRW_R_IO_TIME+rw],
216                                            ktime_to_ms(iobuf->dr_elapsed));
217         }
218
219         iobuf->dr_error = 0;
220 }
221
222 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
223 static void dio_complete_routine(struct bio *bio)
224 {
225         int error = blk_status_to_errno(bio->bi_status);
226 #else
227 static void dio_complete_routine(struct bio *bio, int error)
228 {
229 #endif
230         struct osd_bio_private *bio_private = bio->bi_private;
231         struct osd_iobuf *iobuf = bio_private->obp_iobuf;
232         struct bio_vec *bvl;
233
234
235         /* CAVEAT EMPTOR: possibly in IRQ context
236          * DO NOT record procfs stats here!!!
237          */
238         if (unlikely(iobuf == NULL)) {
239                 CERROR("***** bio->bi_private is NULL! Dump the bio contents to the console. Please report this to <https://jira.whamcloud.com/>, and probably have to reboot this node.\n");
240                 CERROR("bi_next: %p, bi_flags: %lx, " __stringify(bi_opf)
241                        ": %x, bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, bi_private: %p\n",
242                        bio->bi_next, (unsigned long)bio->bi_flags,
243                        (unsigned int)bio->bi_opf, bio->bi_vcnt, bio_idx(bio),
244                        bio_sectors(bio) << 9, bio->bi_end_io,
245                        atomic_read(&bio->__bi_cnt),
246                        bio->bi_private);
247                 return;
248         }
249
250         /* the check is outside of the cycle for performance reason -bzzz */
251         if (!bio_data_dir(bio)) {
252                 DECLARE_BVEC_ITER_ALL(iter_all);
253
254                 bio_for_each_segment_all(bvl, bio, iter_all) {
255                         if (likely(error == 0))
256                                 SetPageUptodate(bvl_to_page(bvl));
257                         LASSERT(PageLocked(bvl_to_page(bvl)));
258                 }
259                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
260         } else {
261                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
262         }
263
264         /* any real error is good enough -bzzz */
265         if (error != 0 && iobuf->dr_error == 0)
266                 iobuf->dr_error = error;
267
268         /*
269          * set dr_elapsed before dr_numreqs turns to 0, otherwise
270          * it's possible that service thread will see dr_numreqs
271          * is zero, but dr_elapsed is not set yet, leading to lost
272          * data in this processing and an assertion in a subsequent
273          * call to OSD.
274          */
275         if (atomic_read(&iobuf->dr_numreqs) == 1) {
276                 ktime_t now = ktime_get();
277
278                 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
279                 iobuf->dr_elapsed_valid = 1;
280         }
281         if (atomic_dec_and_test(&iobuf->dr_numreqs))
282                 wake_up(&iobuf->dr_wait);
283
284         /* Completed bios used to be chained off iobuf->dr_bios and freed in
285          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
286          * mempool when serious on-disk fragmentation was encountered,
287          * deadlocking the OST.  The bios are now released as soon as complete
288          * so the pool cannot be exhausted while IOs are competing. b=10076
289          */
290         osd_bio_fini(bio);
291 }
292
293 static void record_start_io(struct osd_iobuf *iobuf, int size)
294 {
295         struct osd_device *osd = iobuf->dr_dev;
296         struct brw_stats *h = &osd->od_brw_stats;
297
298         iobuf->dr_frags++;
299         atomic_inc(&iobuf->dr_numreqs);
300
301         if (iobuf->dr_rw == 0) {
302                 atomic_inc(&osd->od_r_in_flight);
303                 lprocfs_oh_tally_pcpu(&h->bs_hist[BRW_R_RPC_HIST],
304                                  atomic_read(&osd->od_r_in_flight));
305                 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[BRW_R_DISK_IOSIZE],
306                                            size);
307         } else if (iobuf->dr_rw == 1) {
308                 atomic_inc(&osd->od_w_in_flight);
309                 lprocfs_oh_tally_pcpu(&h->bs_hist[BRW_W_RPC_HIST],
310                                  atomic_read(&osd->od_w_in_flight));
311                 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[BRW_W_DISK_IOSIZE],
312                                            size);
313         } else {
314                 LBUG();
315         }
316 }
317
318 static int osd_submit_bio(struct osd_device *osd,
319                           struct osd_iobuf *iobuf,
320                           struct bio *bio)
321 {
322         struct request_queue *q;
323         unsigned int bi_size;
324         int rc = 0;
325
326         if (bio == NULL)
327                 return 0;
328
329         q = bio_get_queue(bio);
330         bi_size = bio_sectors(bio) << SECTOR_SHIFT;
331         /* Dang! I have to fragment this I/O */
332         CDEBUG(D_INODE,
333                "bio++ sz %d vcnt %d(%d) sectors %d(%d) psg %d(%d)\n",
334                bi_size, bio->bi_vcnt, bio->bi_max_vecs,
335                bio_sectors(bio),
336                queue_max_sectors(q),
337                osd_bio_nr_segs(bio),
338                queue_max_segments(q));
339
340         rc = osd_bio_integrity_handle(osd, bio, iobuf);
341         if (rc)
342                 goto out;
343
344         record_start_io(iobuf, bi_size);
345
346 #ifdef HAVE_SUBMIT_BIO_2ARGS
347         submit_bio(iobuf->dr_rw ? WRITE : READ, bio);
348 #else
349         bio->bi_opf |= iobuf->dr_rw;
350         submit_bio(bio);
351 #endif
352 out:
353         return rc;
354 }
355
356 static int can_be_merged(struct bio *bio, sector_t sector)
357 {
358
359         return bio_end_sector(bio) == sector ? 1 : 0;
360 }
361
362
363 static void osd_mark_page_io_done(struct osd_iobuf *iobuf,
364                                   struct inode *inode,
365                                   sector_t start_blocks,
366                                   sector_t count)
367 {
368         struct niobuf_local **lnbs = iobuf->dr_lnbs;
369         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
370         int i, end;
371
372         i = start_blocks / blocks_per_page;
373         end = (start_blocks + count) / blocks_per_page;
374         for ( ; i < end; i++)
375                 lnbs[i]->lnb_flags |= OBD_BRW_DONE;
376 }
377
378 /*
379  * Linux v5.12-rc1-20-ga8affc03a9b3
380  *  block: rename BIO_MAX_PAGES to BIO_MAX_VECS
381  */
382 #ifndef BIO_MAX_VECS
383 #define BIO_MAX_VECS    BIO_MAX_PAGES
384 #endif
385
386 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
387                       struct osd_iobuf *iobuf, sector_t start_blocks,
388                       sector_t count)
389 {
390         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
391         struct page **pages = iobuf->dr_pages;
392         int npages = iobuf->dr_npages;
393         sector_t *blocks = iobuf->dr_blocks;
394         struct super_block *sb = inode->i_sb;
395         int sector_bits = sb->s_blocksize_bits - SECTOR_SHIFT;
396         unsigned int blocksize = sb->s_blocksize;
397         struct block_device *bdev = sb->s_bdev;
398         struct bio *bio = NULL;
399         int bio_start_page_idx = 0;
400         struct page *page;
401         unsigned int page_offset;
402         sector_t sector;
403         int nblocks;
404         int block_idx, block_idx_end;
405         int page_idx, page_idx_start;
406         int i;
407         int rc = 0;
408         bool integrity_enabled;
409         struct blk_plug plug;
410         int blocks_left_page;
411
412         ENTRY;
413
414         LASSERT(iobuf->dr_npages == npages);
415         osd_brw_stats_update(osd, iobuf);
416         iobuf->dr_start_time = ktime_get();
417         integrity_enabled = bdev_integrity_enabled(bdev, iobuf->dr_rw);
418
419         if (!count)
420                 count = npages * blocks_per_page;
421         block_idx_end = start_blocks + count;
422
423         blk_start_plug(&plug);
424
425         page_idx_start = start_blocks / blocks_per_page;
426         for (page_idx = page_idx_start, block_idx = start_blocks;
427              block_idx < block_idx_end; page_idx++,
428              block_idx += blocks_left_page) {
429                 /* For cases where the filesystems blocksize is not the
430                  * same as PAGE_SIZE (e.g. ARM with PAGE_SIZE=64KB and
431                  * blocksize=4KB), there will be multiple blocks to
432                  * read/write per page. Also, the start and end block may
433                  * not be aligned to the start and end of the page, so the
434                  * first page may skip some blocks at the start ("i != 0",
435                  * "blocks_left_page" is reduced), and the last page may
436                  * skip some blocks at the end (limited by "count").
437                  */
438                 page = pages[page_idx];
439                 LASSERT(page_idx < iobuf->dr_npages);
440
441                 i = block_idx % blocks_per_page;
442                 blocks_left_page = blocks_per_page - i;
443                 if (block_idx + blocks_left_page > block_idx_end)
444                         blocks_left_page = block_idx_end - block_idx;
445                 page_offset = i * blocksize;
446                 for (i = 0; i < blocks_left_page;
447                      i += nblocks, page_offset += blocksize * nblocks) {
448                         nblocks = 1;
449
450                         if (blocks[block_idx + i] == 0) {  /* hole */
451                                 LASSERTF(iobuf->dr_rw == 0,
452                                          "page_idx %u, block_idx %u, i %u,"
453                                          "start_blocks: %llu, count: %llu, npages: %d\n",
454                                          page_idx, block_idx, i,
455                                          (unsigned long long)start_blocks,
456                                          (unsigned long long)count, npages);
457                                 memset(kmap(page) + page_offset, 0, blocksize);
458                                 kunmap(page);
459                                 continue;
460                         }
461
462                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
463
464                         /* Additional contiguous file blocks? */
465                         while (i + nblocks < blocks_left_page &&
466                                (sector + (nblocks << sector_bits)) ==
467                                ((sector_t)blocks[block_idx + i + nblocks] <<
468                                  sector_bits))
469                                 nblocks++;
470
471                         if (bio && can_be_merged(bio, sector) &&
472                             bio_add_page(bio, page, blocksize * nblocks,
473                                          page_offset) != 0)
474                                 continue;       /* added this frag OK */
475
476                         rc = osd_submit_bio(osd, iobuf, bio);
477                         if (rc)
478                                 goto out;
479
480                         bio_start_page_idx = page_idx;
481                         /* allocate new bio */
482                         bio = cfs_bio_alloc(bdev,
483                                             min_t(unsigned short, BIO_MAX_VECS,
484                                                   (block_idx_end - block_idx +
485                                                    blocks_left_page - 1)),
486                                             iobuf->dr_rw ? REQ_OP_WRITE
487                                                          : REQ_OP_READ,
488                                             GFP_NOIO);
489                         if (!bio) {
490                                 CERROR("Can't allocate bio %u pages\n",
491                                        block_idx_end - block_idx +
492                                        blocks_left_page - 1);
493                                 rc = -ENOMEM;
494                                 goto out;
495                         }
496                         bio_set_sector(bio, sector);
497                         rc = osd_bio_init(bio, iobuf, bio_start_page_idx);
498                         if (rc)
499                                 goto out;
500
501                         rc = bio_add_page(bio, page,
502                                           blocksize * nblocks, page_offset);
503                         LASSERT(rc != 0);
504                 }
505         }
506         rc = osd_submit_bio(osd, iobuf, bio);
507         if (rc)
508                 goto out;
509 out:
510         blk_finish_plug(&plug);
511
512         /* in order to achieve better IO throughput, we don't wait for writes
513          * completion here. instead we proceed with transaction commit in
514          * parallel and wait for IO completion once transaction is stopped
515          * see osd_trans_stop() for more details -bzzz
516          */
517         if (iobuf->dr_rw == 0 || CFS_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT)) {
518                 wait_event(iobuf->dr_wait,
519                            atomic_read(&iobuf->dr_numreqs) == 0);
520         }
521
522         if (rc == 0)
523                 rc = iobuf->dr_error;
524         else
525                 osd_bio_fini(bio);
526
527         if (iobuf->dr_rw == 0 || CFS_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT))
528                 osd_fini_iobuf(osd, iobuf);
529
530         /* Write only now */
531         if (rc == 0 && iobuf->dr_rw)
532                 osd_mark_page_io_done(iobuf, inode,
533                                       start_blocks, count);
534
535         RETURN(rc);
536 }
537
538 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
539                                    struct niobuf_local *lnb, int maxlnb)
540 {
541         int rc = 0;
542         ENTRY;
543
544         *nrpages = 0;
545
546         while (len > 0) {
547                 int poff = offset & (PAGE_SIZE - 1);
548                 int plen = PAGE_SIZE - poff;
549
550                 if (*nrpages >= maxlnb) {
551                         rc = -EOVERFLOW;
552                         break;
553                 }
554
555                 if (plen > len)
556                         plen = len;
557                 lnb->lnb_file_offset = offset;
558                 lnb->lnb_page_offset = poff;
559                 lnb->lnb_len = plen;
560                 /* lnb->lnb_flags = rnb->rnb_flags; */
561                 lnb->lnb_flags = 0;
562                 lnb->lnb_page = NULL;
563                 lnb->lnb_rc = 0;
564                 lnb->lnb_guard_rpc = 0;
565                 lnb->lnb_guard_disk = 0;
566                 lnb->lnb_locked = 0;
567
568                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
569                          (long long) len);
570                 offset += plen;
571                 len -= plen;
572                 lnb++;
573                 (*nrpages)++;
574         }
575
576         RETURN(rc);
577 }
578
579 static struct page *osd_get_page(const struct lu_env *env, struct dt_object *dt,
580                                  loff_t offset, gfp_t gfp_mask, bool cache)
581 {
582         struct osd_thread_info *oti = osd_oti_get(env);
583         struct inode *inode = osd_dt_obj(dt)->oo_inode;
584         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
585         struct page *page;
586         int cur;
587
588         LASSERT(inode);
589
590         if (cache) {
591                 page = find_or_create_page(inode->i_mapping,
592                                            offset >> PAGE_SHIFT, gfp_mask);
593
594                 if (likely(page)) {
595                         LASSERT(!PagePrivate2(page));
596                         wait_on_page_writeback(page);
597                 } else {
598                         lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
599                 }
600
601                 return page;
602         }
603
604         if (inode->i_mapping->nrpages) {
605                 /* consult with pagecache, but do not create new pages */
606                 /* this is normally used once */
607                 page = find_lock_page(inode->i_mapping, offset >> PAGE_SHIFT);
608                 if (page) {
609                         wait_on_page_writeback(page);
610                         return page;
611                 }
612         }
613
614         LASSERT(oti->oti_dio_pages);
615         cur = oti->oti_dio_pages_used;
616         page = oti->oti_dio_pages[cur];
617
618         if (unlikely(!page)) {
619                 LASSERT(cur < PTLRPC_MAX_BRW_PAGES);
620                 page = alloc_page(gfp_mask);
621                 if (!page)
622                         return NULL;
623                 oti->oti_dio_pages[cur] = page;
624                 SetPagePrivate2(page);
625                 lock_page(page);
626         }
627
628         ClearPageUptodate(page);
629         page->index = offset >> PAGE_SHIFT;
630         oti->oti_dio_pages_used++;
631
632         return page;
633 }
634
635 /*
636  * there are following "locks":
637  * journal_start
638  * i_mutex
639  * page lock
640  *
641  * osd write path:
642  *  - lock page(s)
643  *  - journal_start
644  *  - truncate_sem
645  *
646  * ext4 vmtruncate:
647  *  - lock pages, unlock
648  *  - journal_start
649  *  - lock partial page
650  *  - i_data_sem
651  *
652  */
653
654 /**
655  * Unlock and release pages loaded by osd_bufs_get()
656  *
657  * Unlock \a npages pages from \a lnb and drop the refcount on them.
658  *
659  * \param env           thread execution environment
660  * \param dt            dt object undergoing IO (OSD object + methods)
661  * \param lnb           array of pages undergoing IO
662  * \param npages        number of pages in \a lnb
663  *
664  * \retval 0            always
665  */
666 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
667                         struct niobuf_local *lnb, int npages)
668 {
669         struct osd_thread_info *oti = osd_oti_get(env);
670         struct pagevec pvec;
671         int i;
672
673         ll_pagevec_init(&pvec, 0);
674
675         for (i = 0; i < npages; i++) {
676                 struct page *page = lnb[i].lnb_page;
677
678                 if (page == NULL)
679                         continue;
680
681                 /* if the page isn't cached, then reset uptodate
682                  * to prevent reuse
683                  */
684                 if (PagePrivate2(page)) {
685                         oti->oti_dio_pages_used--;
686                 } else {
687                         if (lnb[i].lnb_locked)
688                                 unlock_page(page);
689                         if (pagevec_add(&pvec, page) == 0)
690                                 pagevec_release(&pvec);
691                 }
692
693                 lnb[i].lnb_page = NULL;
694         }
695
696         LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used);
697
698         /* Release any partial pagevec */
699         pagevec_release(&pvec);
700
701         RETURN(0);
702 }
703
704 /**
705  * Load and lock pages undergoing IO
706  *
707  * Pages as described in the \a lnb array are fetched (from disk or cache)
708  * and locked for IO by the caller.
709  *
710  * DLM locking protects us from write and truncate competing for same region,
711  * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
712  * It's possible the writeout on a such a page is in progress when we access
713  * it. It's also possible that during this writeout we put new (partial) data
714  * into the page, but won't be able to proceed in filter_commitrw_write().
715  * Therefore, just wait for writeout completion as it should be rare enough.
716  *
717  * \param env           thread execution environment
718  * \param dt            dt object undergoing IO (OSD object + methods)
719  * \param pos           byte offset of IO start
720  * \param len           number of bytes of IO
721  * \param lnb           array of extents undergoing IO
722  * \param rw            read or write operation, and other flags
723  * \param capa          capabilities
724  *
725  * \retval pages        (zero or more) loaded successfully
726  * \retval -ENOMEM      on memory/page allocation error
727  */
728 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
729                         loff_t pos, ssize_t len, struct niobuf_local *lnb,
730                         int maxlnb, enum dt_bufs_type rw)
731 {
732         struct osd_thread_info *oti = osd_oti_get(env);
733         struct osd_object *obj = osd_dt_obj(dt);
734         struct osd_device *osd   = osd_obj2dev(obj);
735         int npages, i, iosize, rc = 0;
736         bool cache, write;
737         loff_t fsize;
738         gfp_t gfp_mask;
739
740         LASSERT(obj->oo_inode);
741
742         if (unlikely(obj->oo_destroyed))
743                 RETURN(-ENOENT);
744
745         rc = osd_map_remote_to_local(pos, len, &npages, lnb, maxlnb);
746         if (rc)
747                 RETURN(rc);
748
749         write = rw & DT_BUFS_TYPE_WRITE;
750
751         fsize = lnb[npages - 1].lnb_file_offset + lnb[npages - 1].lnb_len;
752         iosize = fsize - lnb[0].lnb_file_offset;
753         fsize = max(fsize, i_size_read(obj->oo_inode));
754
755         cache = rw & DT_BUFS_TYPE_READAHEAD;
756         if (cache)
757                 goto bypass_checks;
758
759         cache = osd_use_page_cache(osd);
760         while (cache) {
761                 if (write) {
762                         if (!osd->od_writethrough_cache) {
763                                 cache = false;
764                                 break;
765                         }
766                         if (iosize > osd->od_writethrough_max_iosize) {
767                                 cache = false;
768                                 break;
769                         }
770                 } else {
771                         if (!osd->od_read_cache) {
772                                 cache = false;
773                                 break;
774                         }
775                         if (iosize > osd->od_readcache_max_iosize) {
776                                 cache = false;
777                                 break;
778                         }
779                 }
780                 /* don't use cache on large files */
781                 if (osd->od_readcache_max_filesize &&
782                     fsize > osd->od_readcache_max_filesize)
783                         cache = false;
784                 break;
785         }
786
787 bypass_checks:
788         if (!cache && unlikely(!oti->oti_dio_pages)) {
789                 OBD_ALLOC_PTR_ARRAY_LARGE(oti->oti_dio_pages,
790                                           PTLRPC_MAX_BRW_PAGES);
791                 if (!oti->oti_dio_pages)
792                         return -ENOMEM;
793         }
794
795         /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
796         gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
797                                              GFP_HIGHUSER;
798         for (i = 0; i < npages; i++, lnb++) {
799                 lnb->lnb_page = osd_get_page(env, dt, lnb->lnb_file_offset,
800                                              gfp_mask, cache);
801                 if (lnb->lnb_page == NULL)
802                         GOTO(cleanup, rc = -ENOMEM);
803
804                 lnb->lnb_locked = 1;
805                 if (cache)
806                         mark_page_accessed(lnb->lnb_page);
807         }
808
809 #if 0
810         /* XXX: this version doesn't invalidate cached pages, but use them */
811         if (!cache && write && obj->oo_inode->i_mapping->nrpages) {
812                 /* do not allow data aliasing, invalidate pagecache */
813                 /* XXX: can be quite expensive in mixed case */
814                 invalidate_mapping_pages(obj->oo_inode->i_mapping,
815                                 lnb[0].lnb_file_offset >> PAGE_SHIFT,
816                                 lnb[npages - 1].lnb_file_offset >> PAGE_SHIFT);
817         }
818 #endif
819
820         RETURN(i);
821
822 cleanup:
823         if (i > 0)
824                 osd_bufs_put(env, dt, lnb - i, i);
825         return rc;
826 }
827
828 #ifdef HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS
829 static int osd_extend_restart_trans(handle_t *handle, int needed,
830                                     struct inode *inode)
831 {
832         int rc;
833
834         rc = ldiskfs_journal_ensure_credits(handle, needed,
835                 ldiskfs_trans_default_revoke_credits(inode->i_sb));
836         /* this means journal has been restarted */
837         if (rc > 0)
838                 rc = 0;
839
840         return rc;
841 }
842 #else
843 static int osd_extend_restart_trans(handle_t *handle, int needed,
844                                     struct inode *inode)
845 {
846         int rc;
847
848         if (ldiskfs_handle_has_enough_credits(handle, needed))
849                 return 0;
850         rc = ldiskfs_journal_extend(handle,
851                                 needed - handle->h_buffer_credits);
852         if (rc <= 0)
853                 return rc;
854
855         return ldiskfs_journal_restart(handle, needed);
856 }
857 #endif /* HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS */
858
859 static int osd_ldiskfs_map_write(struct inode *inode, struct osd_iobuf *iobuf,
860                                  struct osd_device *osd, sector_t start_blocks,
861                                  sector_t count, loff_t *disk_size,
862                                  __u64 user_size)
863 {
864         /* if file has grown, take user_size into account */
865         if (user_size && *disk_size > user_size)
866                 *disk_size = user_size;
867
868         spin_lock(&inode->i_lock);
869         if (*disk_size > i_size_read(inode)) {
870                 i_size_write(inode, *disk_size);
871                 LDISKFS_I(inode)->i_disksize = *disk_size;
872                 spin_unlock(&inode->i_lock);
873                 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
874         } else {
875                 spin_unlock(&inode->i_lock);
876         }
877
878         /*
879          * We don't do stats here as in read path because
880          * write is async: we'll do this in osd_put_bufs()
881          */
882         return osd_do_bio(osd, inode, iobuf, start_blocks, count);
883 }
884
885 static unsigned int osd_extent_bytes(const struct osd_device *o)
886 {
887         unsigned int *extent_bytes_ptr =
888                         raw_cpu_ptr(o->od_extent_bytes_percpu);
889
890         if (likely(*extent_bytes_ptr))
891                 return *extent_bytes_ptr;
892
893         /* initialize on first access or CPU hotplug */
894         if (!ldiskfs_has_feature_extents(osd_sb(o)))
895                 *extent_bytes_ptr = 1 << osd_sb(o)->s_blocksize_bits;
896         else
897                 *extent_bytes_ptr = OSD_DEFAULT_EXTENT_BYTES;
898
899         return *extent_bytes_ptr;
900 }
901
902 #define EXTENT_BYTES_DECAY 64
903 static void osd_decay_extent_bytes(struct osd_device *osd,
904                                    unsigned int new_bytes)
905 {
906         unsigned int old_bytes;
907
908         if (!ldiskfs_has_feature_extents(osd_sb(osd)))
909                 return;
910
911         old_bytes = osd_extent_bytes(osd);
912         *raw_cpu_ptr(osd->od_extent_bytes_percpu) =
913                 (old_bytes * (EXTENT_BYTES_DECAY - 1) +
914                  min(new_bytes, OSD_DEFAULT_EXTENT_BYTES) +
915                  EXTENT_BYTES_DECAY - 1) / EXTENT_BYTES_DECAY;
916 }
917
918 static int osd_ldiskfs_map_inode_pages(struct inode *inode,
919                                        struct osd_iobuf *iobuf,
920                                        struct osd_device *osd,
921                                        int create, __u64 user_size,
922                                        int check_credits,
923                                        struct thandle *thandle)
924 {
925         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
926         int rc = 0, i = 0, mapped_index = 0;
927         struct page *fp = NULL;
928         int clen = 0;
929         pgoff_t max_page_index;
930         handle_t *handle = NULL;
931         sector_t start_blocks = 0, count = 0;
932         loff_t disk_size = 0;
933         struct page **page = iobuf->dr_pages;
934         int pages = iobuf->dr_npages;
935         sector_t *blocks = iobuf->dr_blocks;
936         struct niobuf_local *lnb1, *lnb2;
937         loff_t size1, size2;
938
939         max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
940
941         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
942                 inode->i_ino, pages, (*page)->index);
943
944         if (create) {
945                 create = LDISKFS_GET_BLOCKS_CREATE;
946                 handle = ldiskfs_journal_current_handle();
947                 LASSERT(handle != NULL);
948                 rc = osd_attach_jinode(inode);
949                 if (rc)
950                         return rc;
951                 disk_size = i_size_read(inode);
952                 /* if disk_size is already bigger than specified user_size,
953                  * ignore user_size
954                  */
955                 if (disk_size > user_size)
956                         user_size = 0;
957         }
958         /* pages are sorted already. so, we just have to find
959          * contig. space and process them properly
960          */
961         while (i < pages) {
962                 long blen, total = 0, previous_total = 0;
963                 struct ldiskfs_map_blocks map = { 0 };
964                 ktime_t time;
965
966                 if (fp == NULL) { /* start new extent */
967                         fp = *page++;
968                         clen = 1;
969                         if (++i != pages)
970                                 continue;
971                 } else if (fp->index + clen == (*page)->index) {
972                         /* continue the extent */
973                         page++;
974                         clen++;
975                         if (++i != pages)
976                                 continue;
977                 }
978                 if (fp->index + clen >= max_page_index)
979                         GOTO(cleanup, rc = -EFBIG);
980                 /* process found extent */
981                 map.m_lblk = fp->index * blocks_per_page;
982                 map.m_len = blen = clen * blocks_per_page;
983
984                 /*
985                  * Skip already written blocks of the start page.
986                  * Note that this branch will not go into for 4K PAGE_SIZE.
987                  * Because dr_start_pg_wblks is always 0 for 4K PAGE_SIZE.
988                  * iobuf->dr_start_pg_wblks = (start_blocks + count) %
989                  * blocks_per_page.
990                  */
991                 if (iobuf->dr_start_pg_wblks > 0) {
992                         total = previous_total = start_blocks =
993                                 iobuf->dr_start_pg_wblks;
994                         map.m_lblk = fp->index * blocks_per_page +
995                                 total;
996                         map.m_len = blen - total;
997                         iobuf->dr_start_pg_wblks = 0;
998                 }
999
1000 cont_map:
1001                 /**
1002                  * We might restart transaction for block allocations,
1003                  * in order to make sure data ordered mode, issue IO, disk
1004                  * size update and block allocations need be within same
1005                  * transaction to make sure consistency.
1006                  */
1007                 if (handle && check_credits) {
1008                         struct osd_thandle *oh;
1009
1010                         LASSERT(thandle != NULL);
1011                         oh = container_of(thandle, struct osd_thandle,
1012                                           ot_super);
1013                         /*
1014                          * only issue IO if restart transaction needed,
1015                          * as update disk size need hold inode lock, we
1016                          * want to avoid that as much as possible.
1017                          */
1018                         if (oh->oh_declared_ext <= 0) {
1019                                 rc = osd_ldiskfs_map_write(inode,
1020                                         iobuf, osd, start_blocks,
1021                                         count, &disk_size, user_size);
1022                                 if (rc)
1023                                         GOTO(cleanup, rc);
1024                                 thandle->th_restart_tran = 1;
1025                                 iobuf->dr_start_pg_wblks = (start_blocks +
1026                                                 count) % blocks_per_page;
1027                                 GOTO(cleanup, rc = -EAGAIN);
1028                         }
1029
1030                         if (CFS_FAIL_CHECK(OBD_FAIL_OST_RESTART_IO))
1031                                 oh->oh_declared_ext = 0;
1032                         else
1033                                 oh->oh_declared_ext--;
1034                 }
1035
1036                 time = ktime_get();
1037                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
1038                 time = ktime_sub(ktime_get(), time);
1039
1040                 if (rc >= 0) {
1041                         struct brw_stats *h = &osd->od_brw_stats;
1042                         int idx, c = 0;
1043
1044                         idx = map.m_flags & LDISKFS_MAP_NEW ?
1045                                 BRW_ALLOC_TIME : BRW_MAP_TIME;
1046                         lprocfs_oh_tally_log2_pcpu(&h->bs_hist[idx],
1047                                                    ktime_to_ms(time));
1048
1049                         for (; total < blen && c < map.m_len; c++, total++) {
1050                                 if (rc == 0) {
1051                                         *(blocks + total) = 0;
1052                                         total++;
1053                                         break;
1054                                 }
1055                                 if ((map.m_flags & LDISKFS_MAP_UNWRITTEN) &&
1056                                     !create) {
1057                                         /* don't try to read allocated, but
1058                                          * unwritten blocks, instead fill the
1059                                          * patches with zeros in osd_do_bio() */
1060                                         *(blocks + total) = 0;
1061                                         continue;
1062                                 }
1063                                 *(blocks + total) = map.m_pblk + c;
1064                                 /* unmap any possible underlying
1065                                  * metadata from the block device
1066                                  * mapping.  b=6998.
1067                                  */
1068                                 if ((map.m_flags & LDISKFS_MAP_NEW) &&
1069                                     create)
1070                                         clean_bdev_aliases(inode->i_sb->s_bdev,
1071                                                            map.m_pblk + c, 1);
1072                         }
1073                         rc = 0;
1074                 }
1075
1076                 if (rc == 0 && create) {
1077                         count += (total - previous_total);
1078                         mapped_index = (start_blocks + count + blocks_per_page -
1079                                         1) / blocks_per_page - 1;
1080                         lnb1 = iobuf->dr_lnbs[i - clen];
1081                         lnb2 = iobuf->dr_lnbs[mapped_index];
1082                         size1 = lnb1->lnb_file_offset -
1083                                 (lnb1->lnb_file_offset % PAGE_SIZE) +
1084                                 (total << inode->i_blkbits);
1085                         size2 = lnb2->lnb_file_offset + lnb2->lnb_len;
1086
1087                         if (size1 > size2)
1088                                 size1 = size2;
1089                         if (size1 > disk_size)
1090                                 disk_size = size1;
1091                 }
1092
1093                 if (rc == 0 && total < blen) {
1094                         /*
1095                          * decay extent blocks if we could not
1096                          * allocate extent once.
1097                          */
1098                         osd_decay_extent_bytes(osd,
1099                                 (total - previous_total) << inode->i_blkbits);
1100                         map.m_lblk = fp->index * blocks_per_page + total;
1101                         map.m_len = blen - total;
1102                         previous_total = total;
1103                         goto cont_map;
1104                 }
1105                 if (rc != 0)
1106                         GOTO(cleanup, rc);
1107                 /*
1108                  * decay extent blocks if we could allocate
1109                  * good large extent.
1110                  */
1111                 if (total - previous_total >=
1112                     osd_extent_bytes(osd) >> inode->i_blkbits)
1113                         osd_decay_extent_bytes(osd,
1114                                 (total - previous_total) << inode->i_blkbits);
1115                 /* look for next extent */
1116                 fp = NULL;
1117                 blocks += blocks_per_page * clen;
1118         }
1119 cleanup:
1120         if (rc == 0 && create &&
1121             start_blocks < pages * blocks_per_page) {
1122                 rc = osd_ldiskfs_map_write(inode, iobuf, osd, start_blocks,
1123                                            count, &disk_size, user_size);
1124                 LASSERT(start_blocks + count == pages * blocks_per_page);
1125         }
1126         return rc;
1127 }
1128
1129 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1130                           struct niobuf_local *lnb, int npages)
1131 {
1132         struct osd_thread_info *oti   = osd_oti_get(env);
1133         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
1134         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
1135         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
1136         ktime_t start, end;
1137         s64 timediff;
1138         ssize_t isize;
1139         __s64  maxidx;
1140         int i, rc = 0;
1141
1142         LASSERT(inode);
1143
1144         rc = osd_init_iobuf(osd, iobuf, inode, 0, npages);
1145         if (unlikely(rc != 0))
1146                 RETURN(rc);
1147
1148         isize = i_size_read(inode);
1149         maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1150
1151         start = ktime_get();
1152         for (i = 0; i < npages; i++) {
1153
1154                 /*
1155                  * till commit the content of the page is undefined
1156                  * we'll set it uptodate once bulk is done. otherwise
1157                  * subsequent reads can access non-stable data
1158                  */
1159                 ClearPageUptodate(lnb[i].lnb_page);
1160
1161                 if (lnb[i].lnb_len == PAGE_SIZE)
1162                         continue;
1163
1164                 if (maxidx >= lnb[i].lnb_page->index) {
1165                         osd_iobuf_add_page(iobuf, &lnb[i]);
1166                 } else {
1167                         long off;
1168                         char *p = kmap(lnb[i].lnb_page);
1169
1170                         off = lnb[i].lnb_page_offset;
1171                         if (off)
1172                                 memset(p, 0, off);
1173                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1174                               ~PAGE_MASK;
1175                         if (off)
1176                                 memset(p + off, 0, PAGE_SIZE - off);
1177                         kunmap(lnb[i].lnb_page);
1178                 }
1179         }
1180         end = ktime_get();
1181         timediff = ktime_us_delta(end, start);
1182         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1183
1184         if (iobuf->dr_npages) {
1185                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1186                                                  0, 0, NULL);
1187                 if (likely(rc == 0)) {
1188                         rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1189                         /* do IO stats for preparation reads */
1190                         osd_fini_iobuf(osd, iobuf);
1191                 }
1192         }
1193         RETURN(rc);
1194 }
1195
1196 #ifdef KERNEL_DS
1197 #define DECLARE_MM_SEGMENT_T(name)             mm_segment_t name
1198 #define access_set_kernel(saved_fs, fei)                                \
1199 do {                                                                    \
1200         saved_fs = get_fs();                                            \
1201         set_fs(KERNEL_DS);                                              \
1202 } while (0)
1203 #define access_unset_kernel(saved_fs, fei)             set_fs((saved_fs))
1204 #else
1205 #define DECLARE_MM_SEGMENT_T(name)
1206 #define access_set_kernel(saved_fs, fei)                                \
1207         (fei)->fi_flags |= LDISKFS_FIEMAP_FLAG_MEMCPY
1208 #define access_unset_kernel(saved_fs, fei) \
1209         (fei)->fi_flags &= ~(LDISKFS_FIEMAP_FLAG_MEMCPY)
1210 #endif /* KERNEL_DS */
1211
1212 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1213                          struct ldiskfs_map_blocks *map)
1214 {
1215         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1216         int mapped;
1217         sector_t block = osd_i_blocks(inode, offset);
1218         sector_t end;
1219
1220         if (i_size_read(inode) == 0)
1221                 return 0;
1222
1223         /* Beyond EOF, must not be mapped */
1224         if ((i_size_read(inode) - 1) < offset)
1225                 return 0;
1226
1227         end = map->m_lblk + map->m_len;
1228         if (block >= map->m_lblk && block < end)
1229                 return map->m_flags & LDISKFS_MAP_MAPPED;
1230
1231         map->m_lblk = block;
1232         map->m_len = INT_MAX;
1233
1234         mapped = ldiskfs_map_blocks(NULL, inode, map, 0);
1235         if (mapped < 0) {
1236                 map->m_len = 0;
1237                 return 0;
1238         }
1239
1240         return map->m_flags & LDISKFS_MAP_MAPPED;
1241 }
1242
1243 #define MAX_EXTENTS_PER_WRITE 100
1244 static int osd_declare_write_commit(const struct lu_env *env,
1245                                     struct dt_object *dt,
1246                                     struct niobuf_local *lnb, int npages,
1247                                     struct thandle *handle)
1248 {
1249         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1250         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1251         struct osd_thandle      *oh;
1252         int                     extents = 0, new_meta = 0;
1253         int                     depth, new_blocks = 0;
1254         int                     i;
1255         int                     dirty_groups = 0;
1256         int                     rc = 0;
1257         int                     credits = 0;
1258         long long               quota_space = 0;
1259         struct ldiskfs_map_blocks map;
1260         enum osd_quota_local_flags local_flags = 0;
1261         enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1262         unsigned int            extent_bytes;
1263         loff_t extent_start = 0;
1264         loff_t extent_end = 0;
1265         ENTRY;
1266
1267         LASSERT(handle != NULL);
1268         oh = container_of(handle, struct osd_thandle, ot_super);
1269         LASSERT(oh->ot_handle == NULL);
1270
1271         /*
1272          * We track a decaying average extent blocks per filesystem,
1273          * for most of time, it will be 1M, with filesystem becoming
1274          * heavily-fragmented, it will be reduced to 4K at the worst.
1275          */
1276         extent_bytes = osd_extent_bytes(osd);
1277         LASSERT(extent_bytes >= osd_sb(osd)->s_blocksize);
1278
1279         /* calculate number of extents (probably better to pass nb) */
1280         for (i = 0; i < npages; i++) {
1281                 /* ignore quota for the whole request if any page is from
1282                  * client cache or written by root.
1283                  *
1284                  * XXX we could handle this on per-lnb basis as done by
1285                  * grant.
1286                  */
1287                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1288                     (lnb[i].lnb_flags & OBD_BRW_SYS_RESOURCE) ||
1289                     !(lnb[i].lnb_flags & OBD_BRW_SYNC))
1290                         declare_flags |= OSD_QID_FORCE;
1291
1292                 /*
1293                  * Convert unwritten extent might need split extents, could
1294                  * not skip it.
1295                  */
1296                 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &map) &&
1297                     !(map.m_flags & LDISKFS_MAP_UNWRITTEN)) {
1298                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1299                         continue;
1300                 }
1301
1302                 if (lnb[i].lnb_flags & OBD_BRW_DONE) {
1303                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1304                         continue;
1305                 }
1306
1307                 /* count only unmapped changes */
1308                 new_blocks++;
1309                 if (lnb[i].lnb_file_offset != extent_end || extent_end == 0) {
1310                         if (extent_end != 0)
1311                                 extents += (extent_end - extent_start +
1312                                             extent_bytes - 1) / extent_bytes;
1313                         extent_start = lnb[i].lnb_file_offset;
1314                         extent_end = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1315                 } else {
1316                         extent_end += lnb[i].lnb_len;
1317                 }
1318
1319                 quota_space += PAGE_SIZE;
1320         }
1321
1322         credits++; /* inode */
1323         /*
1324          * overwrite case, no need to modify tree and
1325          * allocate blocks.
1326          */
1327         if (!extent_end)
1328                 goto out_declare;
1329
1330         extents += (extent_end - extent_start +
1331                     extent_bytes - 1) / extent_bytes;
1332         /**
1333          * with system space usage growing up, mballoc codes won't
1334          * try best to scan block group to align best free extent as
1335          * we can. So extent bytes per extent could be decayed to a
1336          * very small value, this could make us reserve too many credits.
1337          * We could be more optimistic in the credit reservations, even
1338          * in a case where the filesystem is nearly full, it is extremely
1339          * unlikely that the worst case would ever be hit.
1340          */
1341         if (extents > MAX_EXTENTS_PER_WRITE)
1342                 extents = MAX_EXTENTS_PER_WRITE;
1343
1344         /**
1345          * If we add a single extent, then in the worse case, each tree
1346          * level index/leaf need to be changed in case of the tree split.
1347          * If more extents are inserted, they could cause the whole tree
1348          * split more than once, but this is really rare.
1349          */
1350         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1351                 /*
1352                  * many concurrent threads may grow tree by the time
1353                  * our transaction starts. so, consider 2 is a min depth.
1354                  */
1355                 depth = ext_depth(inode);
1356                 depth = min(max(depth, 1) + 1, LDISKFS_MAX_EXTENT_DEPTH);
1357                 if (extents <= 1) {
1358                         credits += depth * 2 * extents;
1359                         new_meta = depth;
1360                 } else {
1361                         credits += depth * 3 * extents;
1362                         new_meta = depth * 2 * extents;
1363                 }
1364         } else {
1365                 /*
1366                  * With N contiguous data blocks, we need at most
1367                  * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
1368                  * 2 dindirect blocks, and 1 tindirect block
1369                  */
1370                 new_meta = DIV_ROUND_UP(new_blocks,
1371                                 LDISKFS_ADDR_PER_BLOCK(inode->i_sb)) + 4;
1372                 credits += new_meta;
1373         }
1374         dirty_groups += (extents + new_meta);
1375
1376         oh->oh_declared_ext = extents;
1377
1378         /* quota space for metadata blocks */
1379         quota_space += new_meta * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1380
1381         /* quota space should be reported in 1K blocks */
1382         quota_space = toqb(quota_space);
1383
1384         /* each new block can go in different group (bitmap + gd) */
1385
1386         /* we can't dirty more bitmap blocks than exist */
1387         if (dirty_groups > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1388                 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1389         else
1390                 credits += dirty_groups;
1391
1392         /* we can't dirty more gd blocks than exist */
1393         if (dirty_groups > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1394                 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1395         else
1396                 credits += dirty_groups;
1397
1398         CDEBUG(D_INODE,
1399                "%s: inode #%lu extent_bytes %u extents %d credits %d\n",
1400                osd_ino2name(inode), inode->i_ino, extent_bytes, extents,
1401                credits);
1402
1403 out_declare:
1404         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1405
1406         /* make sure the over quota flags were not set */
1407         lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1408
1409         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1410                                    i_projid_read(inode), quota_space, oh,
1411                                    osd_dt_obj(dt), &local_flags, declare_flags);
1412
1413         /* we need only to store the overquota flags in the first lnb for
1414          * now, once we support multiple objects BRW, this code needs be
1415          * revised.
1416          */
1417         if (local_flags & QUOTA_FL_OVER_USRQUOTA)
1418                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1419         if (local_flags & QUOTA_FL_OVER_GRPQUOTA)
1420                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1421         if (local_flags & QUOTA_FL_OVER_PRJQUOTA)
1422                 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1423         if (local_flags & QUOTA_FL_ROOT_PRJQUOTA)
1424                 lnb[0].lnb_flags |= OBD_BRW_ROOT_PRJQUOTA;
1425
1426         if (rc == 0)
1427                 rc = osd_trunc_lock(osd_dt_obj(dt), oh, true);
1428
1429         RETURN(rc);
1430 }
1431
1432 /* Check if a block is allocated or not */
1433 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1434                             struct niobuf_local *lnb, int npages,
1435                             struct thandle *thandle, __u64 user_size)
1436 {
1437         struct osd_thread_info *oti = osd_oti_get(env);
1438         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1439         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1440         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1441         int rc = 0, i, check_credits = 0;
1442
1443         LASSERT(inode);
1444
1445         rc = osd_init_iobuf(osd, iobuf, inode, 1, npages);
1446         if (unlikely(rc != 0))
1447                 RETURN(rc);
1448
1449         dquot_initialize(inode);
1450
1451         for (i = 0; i < npages; i++) {
1452                 if (lnb[i].lnb_rc == -ENOSPC &&
1453                     (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1454                         /* Allow the write to proceed if overwriting an
1455                          * existing block
1456                          */
1457                         lnb[i].lnb_rc = 0;
1458                 }
1459
1460                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1461                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1462                                lnb[i].lnb_rc);
1463                         LASSERT(lnb[i].lnb_page);
1464                         generic_error_remove_page(inode->i_mapping,
1465                                                   lnb[i].lnb_page);
1466                         continue;
1467                 }
1468
1469                 if (lnb[i].lnb_flags & OBD_BRW_DONE)
1470                         continue;
1471
1472                 if (!(lnb[i].lnb_flags & OBD_BRW_MAPPED))
1473                         check_credits = 1;
1474
1475                 LASSERT(PageLocked(lnb[i].lnb_page));
1476                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1477
1478                 /*
1479                  * Since write and truncate are serialized by oo_sem, even
1480                  * partial-page truncate should not leave dirty pages in the
1481                  * page cache.
1482                  */
1483                 LASSERT(!PageDirty(lnb[i].lnb_page));
1484
1485                 SetPageUptodate(lnb[i].lnb_page);
1486
1487                 osd_iobuf_add_page(iobuf, &lnb[i]);
1488         }
1489
1490         osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1491
1492         if (CFS_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1493                 rc = -ENOSPC;
1494         } else if (iobuf->dr_npages > 0) {
1495                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd,
1496                                                  1, user_size,
1497                                                  check_credits,
1498                                                  thandle);
1499         } else {
1500                 /* no pages to write, no transno is needed */
1501                 thandle->th_local = 1;
1502         }
1503
1504         if (rc != 0 && !thandle->th_restart_tran)
1505                 osd_fini_iobuf(osd, iobuf);
1506
1507         osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1508
1509         if (unlikely(rc != 0 && !thandle->th_restart_tran)) {
1510                 /* if write fails, we should drop pages from the cache */
1511                 for (i = 0; i < npages; i++) {
1512                         if (lnb[i].lnb_page == NULL)
1513                                 continue;
1514                         if (!PagePrivate2(lnb[i].lnb_page)) {
1515                                 LASSERT(PageLocked(lnb[i].lnb_page));
1516                                 generic_error_remove_page(inode->i_mapping,
1517                                                           lnb[i].lnb_page);
1518                         }
1519                 }
1520         }
1521
1522         RETURN(rc);
1523 }
1524
1525 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1526                          struct niobuf_local *lnb, int npages)
1527 {
1528         struct osd_thread_info *oti = osd_oti_get(env);
1529         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1530         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1531         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1532         int rc = 0, i, cache_hits = 0, cache_misses = 0;
1533         ktime_t start, end;
1534         s64 timediff;
1535         loff_t isize;
1536
1537         LASSERT(inode);
1538
1539         rc = osd_init_iobuf(osd, iobuf, inode, 0, npages);
1540         if (unlikely(rc != 0))
1541                 RETURN(rc);
1542
1543         isize = i_size_read(inode);
1544
1545         start = ktime_get();
1546         for (i = 0; i < npages; i++) {
1547
1548                 if (isize <= lnb[i].lnb_file_offset)
1549                         /* If there's no more data, abort early.
1550                          * lnb->lnb_rc == 0, so it's easy to detect later.
1551                          */
1552                         break;
1553
1554                 /* instead of looking if we go beyong isize, send complete
1555                  * pages all the time
1556                  */
1557                 lnb[i].lnb_rc = lnb[i].lnb_len;
1558
1559                 /* Bypass disk read if fail_loc is set properly */
1560                 if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_OST_FAKE_RW))
1561                         SetPageUptodate(lnb[i].lnb_page);
1562
1563                 if (PageUptodate(lnb[i].lnb_page)) {
1564                         cache_hits++;
1565                         unlock_page(lnb[i].lnb_page);
1566                 } else {
1567                         cache_misses++;
1568                         osd_iobuf_add_page(iobuf, &lnb[i]);
1569                 }
1570                 /* no need to unlock in osd_bufs_put(), the sooner page is
1571                  * unlocked, the earlier another client can access it.
1572                  * notice real unlock_page() can be called few lines
1573                  * below after osd_do_bio(). lnb is a per-thread, so it's
1574                  * fine to have PG_locked and lnb_locked inconsistent here
1575                  */
1576                 lnb[i].lnb_locked = 0;
1577         }
1578         end = ktime_get();
1579         timediff = ktime_us_delta(end, start);
1580         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1581
1582         if (cache_hits != 0)
1583                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1584                                     cache_hits);
1585         if (cache_misses != 0)
1586                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1587                                     cache_misses);
1588         if (cache_hits + cache_misses != 0)
1589                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1590                                     cache_hits + cache_misses);
1591
1592         if (iobuf->dr_npages) {
1593                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1594                                                  0, 0, NULL);
1595                 if (!rc)
1596                         rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1597
1598                 /* IO stats will be done in osd_bufs_put() */
1599
1600                 /* early release to let others read data during the bulk */
1601                 for (i = 0; i < iobuf->dr_npages; i++) {
1602                         LASSERT(PageLocked(iobuf->dr_pages[i]));
1603                         if (!PagePrivate2(iobuf->dr_pages[i]))
1604                                 unlock_page(iobuf->dr_pages[i]);
1605                 }
1606         }
1607
1608         RETURN(rc);
1609 }
1610
1611 /*
1612  * XXX: Another layering violation for now.
1613  *
1614  * We don't want to use ->f_op->read methods, because generic file write
1615  *
1616  *         - serializes on ->i_sem, and
1617  *
1618  *         - does a lot of extra work like balance_dirty_pages(),
1619  *
1620  * which doesn't work for globally shared files like /last_rcvd.
1621  */
1622 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1623 {
1624         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1625
1626         memcpy(buffer, (char *)ei->i_data, buflen);
1627
1628         return  buflen;
1629 }
1630
1631 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1632 {
1633         struct buffer_head *bh;
1634         unsigned long block;
1635         int osize;
1636         int blocksize;
1637         int csize;
1638         int boffs;
1639
1640         /* prevent reading after eof */
1641         spin_lock(&inode->i_lock);
1642         if (i_size_read(inode) < *offs + size) {
1643                 loff_t diff = i_size_read(inode) - *offs;
1644
1645                 spin_unlock(&inode->i_lock);
1646                 if (diff < 0) {
1647                         CDEBUG(D_OTHER,
1648                                "size %llu is too short to read @%llu\n",
1649                                i_size_read(inode), *offs);
1650                         return -EBADR;
1651                 } else if (diff == 0) {
1652                         return 0;
1653                 } else {
1654                         size = diff;
1655                 }
1656         } else {
1657                 spin_unlock(&inode->i_lock);
1658         }
1659
1660         blocksize = 1 << inode->i_blkbits;
1661         osize = size;
1662         while (size > 0) {
1663                 block = *offs >> inode->i_blkbits;
1664                 boffs = *offs & (blocksize - 1);
1665                 csize = min(blocksize - boffs, size);
1666                 bh = __ldiskfs_bread(NULL, inode, block, 0);
1667                 if (IS_ERR(bh)) {
1668                         CERROR("%s: can't read %u@%llu on ino %lu: rc = %ld\n",
1669                                osd_ino2name(inode), csize, *offs, inode->i_ino,
1670                                PTR_ERR(bh));
1671                         return PTR_ERR(bh);
1672                 }
1673
1674                 if (bh != NULL) {
1675                         memcpy(buf, bh->b_data + boffs, csize);
1676                         brelse(bh);
1677                 } else {
1678                         memset(buf, 0, csize);
1679                 }
1680
1681                 *offs += csize;
1682                 buf += csize;
1683                 size -= csize;
1684         }
1685         return osize;
1686 }
1687
1688 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1689                         struct lu_buf *buf, loff_t *pos)
1690 {
1691         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1692         int rc;
1693
1694         /* Read small symlink from inode body as we need to maintain correct
1695          * on-disk symlinks for ldiskfs.
1696          */
1697         if (S_ISLNK(dt->do_lu.lo_header->loh_attr)) {
1698                 loff_t size = i_size_read(inode);
1699
1700                 if (buf->lb_len < size)
1701                         return -EOVERFLOW;
1702
1703                 if (size < sizeof(LDISKFS_I(inode)->i_data))
1704                         rc = osd_ldiskfs_readlink(inode, buf->lb_buf, size);
1705                 else
1706                         rc = osd_ldiskfs_read(inode, buf->lb_buf, size, pos);
1707         } else {
1708                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1709         }
1710
1711         return rc;
1712 }
1713
1714 static inline int osd_extents_enabled(struct super_block *sb,
1715                                       struct inode *inode)
1716 {
1717         if (inode != NULL) {
1718                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1719                         return 1;
1720         } else if (ldiskfs_has_feature_extents(sb)) {
1721                 return 1;
1722         }
1723         return 0;
1724 }
1725
1726 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1727                            const loff_t size, const loff_t pos,
1728                            const int blocks)
1729 {
1730         int credits, bits, bs, i;
1731
1732         bits = sb->s_blocksize_bits;
1733         bs = 1 << bits;
1734
1735         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1736          * we do not expect blockmaps on the large files,
1737          * so let's shrink it to 2 levels (4GB files)
1738          */
1739
1740         /* this is default reservation: 2 levels */
1741         credits = (blocks + 2) * 3;
1742
1743         /* actual offset is unknown, hard to optimize */
1744         if (pos == -1)
1745                 return credits;
1746
1747         /* now check for few specific cases to optimize */
1748         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1749                 /* no indirects */
1750                 credits = blocks;
1751                 /* allocate if not allocated */
1752                 if (inode == NULL) {
1753                         credits += blocks * 2;
1754                         return credits;
1755                 }
1756                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1757                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1758                         if (LDISKFS_I(inode)->i_data[i] == 0)
1759                                 credits += 2;
1760                 }
1761         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1762                 /* single indirect */
1763                 credits = blocks * 3;
1764                 if (inode == NULL ||
1765                     LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1766                         credits += 3;
1767                 else
1768                         /* The indirect block may be modified. */
1769                         credits += 1;
1770         }
1771
1772         return credits;
1773 }
1774
1775 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1776                                  const struct lu_buf *buf, loff_t _pos,
1777                                  struct thandle *handle)
1778 {
1779         struct osd_object  *obj  = osd_dt_obj(dt);
1780         struct inode       *inode = obj->oo_inode;
1781         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1782         struct osd_thandle *oh;
1783         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1784         int                 bits, bs;
1785         int                 depth, size;
1786         loff_t              pos;
1787         ENTRY;
1788
1789         LASSERT(buf != NULL);
1790         LASSERT(handle != NULL);
1791
1792         oh = container_of(handle, struct osd_thandle, ot_super);
1793         LASSERT(oh->ot_handle == NULL);
1794
1795         size = buf->lb_len;
1796         bits = sb->s_blocksize_bits;
1797         bs = 1 << bits;
1798
1799         if (osd_tx_was_declared(env, oh, dt, DTO_WRITE_BASE, _pos))
1800                 RETURN(0);
1801
1802         if (_pos == -1) {
1803                 /* if this is an append, then we
1804                  * should expect cross-block record
1805                  */
1806                 pos = 0;
1807         } else {
1808                 pos = _pos;
1809         }
1810
1811         /* blocks to modify */
1812         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1813         LASSERT(blocks > 0);
1814
1815         if (inode != NULL && _pos != -1) {
1816                 /* object size in blocks */
1817                 est = (i_size_read(inode) + bs - 1) >> bits;
1818                 allocated = inode->i_blocks >> (bits - 9);
1819                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1820                         /* looks like an overwrite, no need to modify tree */
1821                         credits = blocks;
1822                         /* no need to modify i_size */
1823                         goto out;
1824                 }
1825         }
1826
1827         if (osd_extents_enabled(sb, inode)) {
1828                 /*
1829                  * many concurrent threads may grow tree by the time
1830                  * our transaction starts. so, consider 2 is a min depth
1831                  * for every level we may need to allocate a new block
1832                  * and take some entries from the old one. so, 3 blocks
1833                  * to allocate (bitmap, gd, itself) + old block - 4 per
1834                  * level.
1835                  */
1836                 depth = inode != NULL ? ext_depth(inode) : 0;
1837                 depth = min(max(depth, 1) + 3, LDISKFS_MAX_EXTENT_DEPTH);
1838                 credits = depth;
1839                 /* if not append, then split may need to modify
1840                  * existing blocks moving entries into the new ones
1841                  */
1842                 if (_pos != -1)
1843                         credits += depth;
1844                 /* blocks to store data: bitmap,gd,itself */
1845                 credits += blocks * 3;
1846         } else {
1847                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1848         }
1849         /* if inode is created as part of the transaction,
1850          * then it's counted already by the creation method
1851          */
1852         if (inode != NULL)
1853                 credits++;
1854
1855 out:
1856
1857         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1858
1859         /* dt_declare_write() is usually called for system objects, such
1860          * as llog or last_rcvd files. We needn't enforce quota on those
1861          * objects, so always set the lqi_space as 0.
1862          */
1863         if (inode != NULL)
1864                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1865                                            i_gid_read(inode),
1866                                            i_projid_read(inode), 0,
1867                                            oh, obj, NULL, OSD_QID_BLK);
1868
1869         if (rc == 0)
1870                 rc = osd_trunc_lock(obj, oh, true);
1871
1872         RETURN(rc);
1873 }
1874
1875 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1876 {
1877         /* LU-2634: clear the extent format for fast symlink */
1878         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1879
1880         /* Copying the NUL byte terminating the link target as well */
1881         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen + 1);
1882         spin_lock(&inode->i_lock);
1883         LDISKFS_I(inode)->i_disksize = buflen;
1884         i_size_write(inode, buflen);
1885         spin_unlock(&inode->i_lock);
1886         osd_dirty_inode(inode, I_DIRTY_DATASYNC);
1887
1888         return 0;
1889 }
1890
1891 static int osd_ldiskfs_write_record(struct dt_object *dt, void *buf,
1892                                     int bufsize, int write_NUL, loff_t *offs,
1893                                     handle_t *handle)
1894 {
1895         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1896         struct buffer_head *bh        = NULL;
1897         loff_t              offset    = *offs;
1898         loff_t              new_size  = i_size_read(inode);
1899         unsigned long       block;
1900         int                 blocksize = 1 << inode->i_blkbits;
1901         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1902         int                 err = 0;
1903         int                 size;
1904         int                 boffs;
1905         int                 dirty_inode = 0;
1906         bool create, sparse, sync = false;
1907
1908         if (write_NUL) {
1909                 /*
1910                  * long symlink write does not count the NUL terminator in
1911                  * bufsize, we write it, and the inode's file size does not
1912                  * count the NUL terminator as well.
1913                  */
1914                 ((char *)buf)[bufsize] = '\0';
1915                 ++bufsize;
1916         }
1917
1918         /* only the first flag-set matters */
1919         dirty_inode = !test_and_set_bit(LDISKFS_INODE_JOURNAL_DATA,
1920                                        &ei->i_flags);
1921
1922         /* sparse checking is racy, but sparse is very rare case, leave as is */
1923         sparse = (new_size > 0 && (inode->i_blocks >> (inode->i_blkbits - 9)) <
1924                   ((new_size - 1) >> inode->i_blkbits) + 1);
1925
1926         while (bufsize > 0) {
1927                 int credits = handle->h_buffer_credits;
1928                 unsigned long last_block = (new_size == 0) ? 0 :
1929                                            (new_size - 1) >> inode->i_blkbits;
1930
1931                 if (bh)
1932                         brelse(bh);
1933
1934                 block = offset >> inode->i_blkbits;
1935                 boffs = offset & (blocksize - 1);
1936                 size = min(blocksize - boffs, bufsize);
1937                 sync = (block > last_block || new_size == 0 || sparse);
1938
1939                 if (sync)
1940                         down(&ei->i_append_sem);
1941
1942                 bh = __ldiskfs_bread(handle, inode, block, 0);
1943
1944                 if (unlikely(IS_ERR_OR_NULL(bh) && !sync))
1945                         CWARN(
1946                               "%s: adding bh without locking off %llu (block %lu, size %d, offs %llu)\n",
1947                               osd_ino2name(inode),
1948                               offset, block, bufsize, *offs);
1949
1950                 if (IS_ERR_OR_NULL(bh)) {
1951                         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1952                         int flags = LDISKFS_GET_BLOCKS_CREATE;
1953
1954                         /* while the file system is being mounted, avoid
1955                          * preallocation otherwise mount can take a long
1956                          * time as mballoc cache is cold.
1957                          * XXX: this is a workaround until we have a proper
1958                          *      fix in mballoc
1959                          * XXX: works with extent-based files only */
1960                         if (!osd->od_cl_seq)
1961                                 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
1962                         bh = __ldiskfs_bread(handle, inode, block, flags);
1963                         create = true;
1964                 } else {
1965                         if (sync) {
1966                                 up(&ei->i_append_sem);
1967                                 sync = false;
1968                         }
1969                         create = false;
1970                 }
1971                 if (IS_ERR_OR_NULL(bh)) {
1972                         if (bh == NULL) {
1973                                 err = -EIO;
1974                         } else {
1975                                 err = PTR_ERR(bh);
1976                                 bh = NULL;
1977                         }
1978
1979                         CERROR(
1980                                "%s: error reading offset %llu (block %lu, size %d, offs %llu), credits %d/%d: rc = %d\n",
1981                                osd_ino2name(inode), offset, block, bufsize,
1982                                *offs, credits, handle->h_buffer_credits, err);
1983                         break;
1984                 }
1985
1986                 err = osd_ldiskfs_journal_get_write_access(handle, inode->i_sb,
1987                                                            bh,
1988                                                            LDISKFS_JTR_NONE);
1989                 if (err) {
1990                         CERROR("journal_get_write_access() returned error %d\n",
1991                                err);
1992                         break;
1993                 }
1994                 LASSERTF(boffs + size <= bh->b_size,
1995                          "boffs %d size %d bh->b_size %lu\n",
1996                          boffs, size, (unsigned long)bh->b_size);
1997                 if (create) {
1998                         memset(bh->b_data, 0, bh->b_size);
1999                         if (sync) {
2000                                 up(&ei->i_append_sem);
2001                                 sync = false;
2002                         }
2003                 }
2004                 memcpy(bh->b_data + boffs, buf, size);
2005                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2006                 if (err)
2007                         break;
2008
2009                 if (offset + size > new_size)
2010                         new_size = offset + size;
2011                 offset += size;
2012                 bufsize -= size;
2013                 buf += size;
2014         }
2015         if (sync)
2016                 up(&ei->i_append_sem);
2017
2018         if (bh)
2019                 brelse(bh);
2020
2021         if (write_NUL)
2022                 --new_size;
2023         /* correct in-core and on-disk sizes */
2024         if (new_size > i_size_read(inode)) {
2025                 spin_lock(&inode->i_lock);
2026                 if (new_size > i_size_read(inode))
2027                         i_size_write(inode, new_size);
2028                 if (i_size_read(inode) > ei->i_disksize) {
2029                         ei->i_disksize = i_size_read(inode);
2030                         dirty_inode = 1;
2031                 }
2032                 spin_unlock(&inode->i_lock);
2033         }
2034         if (dirty_inode)
2035                 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2036
2037         if (err == 0)
2038                 *offs = offset;
2039         return err;
2040 }
2041
2042 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
2043                          const struct lu_buf *buf, loff_t *pos,
2044                          struct thandle *handle)
2045 {
2046         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
2047         struct osd_thandle      *oh;
2048         ssize_t                 result;
2049         int                     is_link;
2050
2051         LASSERT(dt_object_exists(dt));
2052
2053         LASSERT(handle != NULL);
2054         LASSERT(inode != NULL);
2055         dquot_initialize(inode);
2056
2057         /* XXX: don't check: one declared chunk can be used many times */
2058         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
2059
2060         oh = container_of(handle, struct osd_thandle, ot_super);
2061         LASSERT(oh->ot_handle->h_transaction != NULL);
2062         osd_trans_exec_op(env, handle, OSD_OT_WRITE);
2063
2064         /* Write small symlink to inode body as we need to maintain correct
2065          * on-disk symlinks for ldiskfs.
2066          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
2067          * does not count it in.
2068          */
2069         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
2070         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
2071                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
2072         else
2073                 result = osd_ldiskfs_write_record(dt, buf->lb_buf, buf->lb_len,
2074                                                   is_link, pos, oh->ot_handle);
2075         if (result == 0)
2076                 result = buf->lb_len;
2077
2078         osd_trans_exec_check(env, handle, OSD_OT_WRITE);
2079
2080         return result;
2081 }
2082
2083 static int osd_declare_fallocate(const struct lu_env *env,
2084                                  struct dt_object *dt, __u64 start, __u64 end,
2085                                  int mode, struct thandle *th)
2086 {
2087         struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2088         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2089         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2090         long long quota_space = 0;
2091         /* 5 is max tree depth. (inode + 4 index blocks) */
2092         int depth = 5;
2093         int rc;
2094
2095         ENTRY;
2096
2097         /*
2098          * mode == 0 (which is standard prealloc) and PUNCH is supported
2099          * Rest of mode options is not supported yet.
2100          */
2101         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2102                 RETURN(-EOPNOTSUPP);
2103
2104         /* disable fallocate completely */
2105         if (osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks < 0)
2106                 RETURN(-EOPNOTSUPP);
2107
2108         LASSERT(th);
2109         LASSERT(inode);
2110
2111         if (mode & FALLOC_FL_PUNCH_HOLE) {
2112                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
2113                                            i_gid_read(inode),
2114                                            i_projid_read(inode), 0, oh,
2115                                            osd_dt_obj(dt), NULL, OSD_QID_BLK);
2116                 if (rc == 0)
2117                         rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
2118                 RETURN(rc);
2119         }
2120
2121         /* quota space for metadata blocks
2122          * approximate metadata estimate should be good enough.
2123          */
2124         quota_space += PAGE_SIZE;
2125         quota_space += depth * LDISKFS_BLOCK_SIZE(osd_sb(osd));
2126
2127         /* quota space should be reported in 1K blocks */
2128         quota_space = toqb(quota_space) + toqb(end - start) +
2129                       LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
2130
2131         /* We don't need to reserve credits for whole fallocate here.
2132          * We reserve space only for metadata. Fallocate credits are
2133          * extended as required
2134          */
2135         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2136                                    i_projid_read(inode), quota_space, oh,
2137                                    osd_dt_obj(dt), NULL, OSD_QID_BLK);
2138         RETURN(rc);
2139 }
2140
2141 static int osd_fallocate_preallocate(const struct lu_env *env,
2142                                      struct dt_object *dt,
2143                                      __u64 start, __u64 end, int mode,
2144                                      struct thandle *th)
2145 {
2146         struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2147         handle_t *handle = ldiskfs_journal_current_handle();
2148         unsigned int save_credits = oh->ot_credits;
2149         struct osd_object *obj = osd_dt_obj(dt);
2150         struct inode *inode = obj->oo_inode;
2151         struct ldiskfs_map_blocks map;
2152         unsigned int credits;
2153         ldiskfs_lblk_t blen;
2154         ldiskfs_lblk_t boff;
2155         loff_t new_size = 0;
2156         int depth = 0;
2157         int flags;
2158         int rc = 0;
2159
2160         ENTRY;
2161
2162         LASSERT(dt_object_exists(dt));
2163         LASSERT(osd_invariant(obj));
2164         LASSERT(inode != NULL);
2165
2166         CDEBUG(D_INODE, "fallocate: inode #%lu: start %llu end %llu mode %d\n",
2167                inode->i_ino, start, end, mode);
2168
2169         dquot_initialize(inode);
2170
2171         LASSERT(th);
2172
2173         boff = osd_i_blocks(inode, start);
2174         blen = osd_i_blocks(inode, ALIGN(end, 1 << inode->i_blkbits)) - boff;
2175
2176         /* Create and mark new extents as either zero or unwritten */
2177         flags = (osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks ||
2178                  !ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)) ?
2179                 LDISKFS_GET_BLOCKS_CREATE_ZERO :
2180                 LDISKFS_GET_BLOCKS_CREATE_UNWRIT_EXT;
2181 #ifdef LDISKFS_GET_BLOCKS_KEEP_SIZE
2182         if (mode & FALLOC_FL_KEEP_SIZE)
2183                 flags |= LDISKFS_GET_BLOCKS_KEEP_SIZE;
2184 #endif
2185         inode_lock(inode);
2186
2187         if (!(mode & FALLOC_FL_KEEP_SIZE) && (end > i_size_read(inode) ||
2188             end > LDISKFS_I(inode)->i_disksize)) {
2189                 new_size = end;
2190                 rc = inode_newsize_ok(inode, new_size);
2191                 if (rc)
2192                         GOTO(out, rc);
2193         }
2194
2195         inode_dio_wait(inode);
2196
2197         map.m_lblk = boff;
2198         map.m_len = blen;
2199
2200         /* Don't normalize the request if it can fit in one extent so
2201          * that it doesn't get unnecessarily split into multiple extents.
2202          */
2203         if (blen <= EXT_UNWRITTEN_MAX_LEN)
2204                 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
2205
2206         /*
2207          * credits to insert 1 extent into extent tree.
2208          */
2209         credits = ldiskfs_chunk_trans_blocks(inode, blen);
2210         depth = ext_depth(inode);
2211
2212         while (rc >= 0 && blen) {
2213                 loff_t epos;
2214
2215                 /*
2216                  * Recalculate credits when extent tree depth changes.
2217                  */
2218                 if (depth != ext_depth(inode)) {
2219                         credits = ldiskfs_chunk_trans_blocks(inode, blen);
2220                         depth = ext_depth(inode);
2221                 }
2222
2223                 /* TODO: quota check */
2224                 rc = osd_extend_restart_trans(handle, credits, inode);
2225                 if (rc)
2226                         break;
2227
2228                 rc = ldiskfs_map_blocks(handle, inode, &map, flags);
2229                 if (rc <= 0) {
2230                         CDEBUG(D_INODE,
2231                                "inode #%lu: block %u: len %u: ldiskfs_map_blocks returned %d\n",
2232                                inode->i_ino, map.m_lblk, map.m_len, rc);
2233                         ldiskfs_mark_inode_dirty(handle, inode);
2234                         break;
2235                 }
2236
2237                 map.m_lblk += rc;
2238                 map.m_len = blen = blen - rc;
2239                 epos = (loff_t)map.m_lblk << inode->i_blkbits;
2240                 inode->i_ctime = current_time(inode);
2241                 if (new_size) {
2242                         if (epos > end)
2243                                 epos = end;
2244                         if (ldiskfs_update_inode_size(inode, epos) & 0x1)
2245                                 inode->i_mtime = inode->i_ctime;
2246 #ifdef LDISKFS_EOFBLOCKS_FL
2247                 } else {
2248                         if (epos > inode->i_size)
2249                                 ldiskfs_set_inode_flag(inode,
2250                                                        LDISKFS_INODE_EOFBLOCKS);
2251 #endif
2252                 }
2253
2254                 ldiskfs_mark_inode_dirty(handle, inode);
2255         }
2256
2257 out:
2258         /* extand credits if needed for operations such as attribute set */
2259         if (rc >= 0)
2260                 rc = osd_extend_restart_trans(handle, save_credits, inode);
2261
2262         inode_unlock(inode);
2263
2264         RETURN(rc);
2265 }
2266
2267 static int osd_fallocate_punch(const struct lu_env *env, struct dt_object *dt,
2268                                __u64 start, __u64 end, int mode,
2269                                struct thandle *th)
2270 {
2271         struct osd_object *obj = osd_dt_obj(dt);
2272         struct inode *inode = obj->oo_inode;
2273         struct osd_access_lock *al;
2274         struct osd_thandle *oh;
2275         int rc = 0, found = 0;
2276
2277         ENTRY;
2278
2279         LASSERT(dt_object_exists(dt));
2280         LASSERT(osd_invariant(obj));
2281         LASSERT(inode != NULL);
2282
2283         dquot_initialize(inode);
2284
2285         LASSERT(th);
2286         oh = container_of(th, struct osd_thandle, ot_super);
2287         LASSERT(oh->ot_handle->h_transaction != NULL);
2288
2289         list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2290                 if (obj != al->tl_obj)
2291                         continue;
2292                 LASSERT(al->tl_shared == 0);
2293                 found = 1;
2294                 /* do actual punch in osd_trans_stop() */
2295                 al->tl_start = start;
2296                 al->tl_end = end;
2297                 al->tl_mode = mode;
2298                 al->tl_punch = true;
2299                 break;
2300         }
2301
2302         RETURN(rc);
2303 }
2304
2305 static int osd_fallocate(const struct lu_env *env, struct dt_object *dt,
2306                          __u64 start, __u64 end, int mode, struct thandle *th)
2307 {
2308         int rc;
2309
2310         ENTRY;
2311
2312         if (mode & FALLOC_FL_PUNCH_HOLE) {
2313                 /* punch */
2314                 rc = osd_fallocate_punch(env, dt, start, end, mode, th);
2315         } else {
2316                 /* standard preallocate */
2317                 rc = osd_fallocate_preallocate(env, dt, start, end, mode, th);
2318         }
2319         RETURN(rc);
2320 }
2321
2322 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
2323                              __u64 start, __u64 end, struct thandle *th)
2324 {
2325         struct osd_thandle *oh;
2326         struct osd_object  *obj = osd_dt_obj(dt);
2327         struct inode       *inode;
2328         int                 rc;
2329         ENTRY;
2330
2331         LASSERT(th);
2332         oh = container_of(th, struct osd_thandle, ot_super);
2333
2334         /*
2335          * we don't need to reserve credits for whole truncate
2336          * it's not possible as truncate may need to free too many
2337          * blocks and that won't fit a single transaction. instead
2338          * we reserve credits to change i_size and put inode onto
2339          * orphan list. if needed truncate will extend or restart
2340          * transaction
2341          */
2342         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
2343                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
2344
2345         inode = obj->oo_inode;
2346         LASSERT(inode);
2347
2348         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2349                                    i_projid_read(inode), 0, oh, obj,
2350                                    NULL, OSD_QID_BLK);
2351
2352         /* if object holds encrypted content, we need to make sure we truncate
2353          * on an encryption unit boundary, or subsequent reads will get
2354          * corrupted content
2355          */
2356         if (rc == 0) {
2357                 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2358                     start & ~LUSTRE_ENCRYPTION_MASK)
2359                         start = (start & LUSTRE_ENCRYPTION_MASK) +
2360                                 LUSTRE_ENCRYPTION_UNIT_SIZE;
2361                 ll_truncate_pagecache(inode, start);
2362                 rc = osd_trunc_lock(obj, oh, false);
2363         }
2364
2365         RETURN(rc);
2366 }
2367
2368 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
2369                      __u64 start, __u64 end, struct thandle *th)
2370 {
2371         struct osd_object *obj = osd_dt_obj(dt);
2372         struct osd_device *osd = osd_obj2dev(obj);
2373         struct inode *inode = obj->oo_inode;
2374         struct osd_access_lock *al;
2375         struct osd_thandle *oh;
2376         int rc = 0, found = 0;
2377         bool grow = false;
2378         ENTRY;
2379
2380         LASSERT(dt_object_exists(dt));
2381         LASSERT(osd_invariant(obj));
2382         LASSERT(inode != NULL);
2383         dquot_initialize(inode);
2384
2385         LASSERT(th);
2386         oh = container_of(th, struct osd_thandle, ot_super);
2387         LASSERT(oh->ot_handle->h_transaction != NULL);
2388
2389         /* we used to skip truncate to current size to
2390          * optimize truncates on OST. with DoM we can
2391          * get attr_set to set specific size (MDS_REINT)
2392          * and then get truncate RPC which essentially
2393          * would be skipped. this is bad.. so, disable
2394          * this optimization on MDS till the client stop
2395          * to sent MDS_REINT (LU-11033) -bzzz
2396          */
2397         if (osd->od_is_ost && i_size_read(inode) == start)
2398                 RETURN(0);
2399
2400         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
2401
2402         spin_lock(&inode->i_lock);
2403         if (i_size_read(inode) < start)
2404                 grow = true;
2405         i_size_write(inode, start);
2406         spin_unlock(&inode->i_lock);
2407
2408         /* optimize grow case */
2409         if (grow) {
2410                 osd_execute_truncate(obj);
2411                 GOTO(out, rc);
2412         }
2413
2414         inode_lock(inode);
2415         /* add to orphan list to ensure truncate completion
2416          * if this transaction succeed. ldiskfs_truncate()
2417          * will take the inode out of the list
2418          */
2419         rc = ldiskfs_orphan_add(oh->ot_handle, inode);
2420         inode_unlock(inode);
2421         if (rc != 0)
2422                 GOTO(out, rc);
2423
2424         list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2425                 if (obj != al->tl_obj)
2426                         continue;
2427                 LASSERT(al->tl_shared == 0);
2428                 found = 1;
2429                 /* do actual truncate in osd_trans_stop() */
2430                 al->tl_truncate = 1;
2431                 break;
2432         }
2433         LASSERT(found);
2434
2435 out:
2436         RETURN(rc);
2437 }
2438
2439 static int fiemap_check_ranges(struct inode *inode,
2440                                u64 start, u64 len, u64 *new_len)
2441 {
2442         loff_t maxbytes;
2443
2444         *new_len = len;
2445
2446         if (len == 0)
2447                 return -EINVAL;
2448
2449         if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
2450                 maxbytes = inode->i_sb->s_maxbytes;
2451         else
2452                 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
2453
2454         if (start > maxbytes)
2455                 return -EFBIG;
2456
2457         /*
2458          * Shrink request scope to what the fs can actually handle.
2459          */
2460         if (len > maxbytes || (maxbytes - len) < start)
2461                 *new_len = maxbytes - start;
2462
2463         return 0;
2464 }
2465
2466 /* So that the fiemap access checks can't overflow on 32 bit machines. */
2467 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
2468
2469 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
2470                           struct fiemap *fm)
2471 {
2472         struct fiemap_extent_info fieinfo = {0, };
2473         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2474         u64 len;
2475         int rc;
2476         DECLARE_MM_SEGMENT_T(saved_fs);
2477
2478         LASSERT(inode);
2479         if (inode->i_op->fiemap == NULL)
2480                 return -EOPNOTSUPP;
2481
2482         if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
2483                 return -EINVAL;
2484
2485         rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
2486         if (rc)
2487                 return rc;
2488
2489         fieinfo.fi_flags = fm->fm_flags;
2490         fieinfo.fi_extents_max = fm->fm_extent_count;
2491         fieinfo.fi_extents_start = fm->fm_extents;
2492
2493         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
2494                 filemap_write_and_wait(inode->i_mapping);
2495
2496         access_set_kernel(saved_fs, &fieinfo);
2497         rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2498         access_unset_kernel(saved_fs, &fieinfo);
2499         fm->fm_flags = fieinfo.fi_flags;
2500         fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2501
2502         return rc;
2503 }
2504
2505 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2506                        __u64 start, __u64 end, enum lu_ladvise_type advice)
2507 {
2508         struct osd_object *obj = osd_dt_obj(dt);
2509         int rc = 0;
2510         ENTRY;
2511
2512         switch (advice) {
2513         case LU_LADVISE_DONTNEED:
2514                 if (end)
2515                         invalidate_mapping_pages(obj->oo_inode->i_mapping,
2516                                                  start >> PAGE_SHIFT,
2517                                                  (end - 1) >> PAGE_SHIFT);
2518                 break;
2519         default:
2520                 rc = -ENOTSUPP;
2521                 break;
2522         }
2523
2524         RETURN(rc);
2525 }
2526
2527 static loff_t osd_lseek(const struct lu_env *env, struct dt_object *dt,
2528                         loff_t offset, int whence)
2529 {
2530         struct osd_object *obj = osd_dt_obj(dt);
2531         struct osd_device *dev = osd_obj2dev(obj);
2532         struct inode *inode = obj->oo_inode;
2533         struct file *file;
2534         loff_t result;
2535
2536         ENTRY;
2537         LASSERT(dt_object_exists(dt));
2538         LASSERT(osd_invariant(obj));
2539         LASSERT(inode);
2540         LASSERT(offset >= 0);
2541
2542         file = alloc_file_pseudo(inode, dev->od_mnt, "/", O_NOATIME,
2543                                  inode->i_fop);
2544         if (IS_ERR(file))
2545                 RETURN(PTR_ERR(file));
2546
2547         file->f_mode |= FMODE_64BITHASH;
2548         result = file->f_op->llseek(file, offset, whence);
2549         ihold(inode);
2550         fput(file);
2551         /*
2552          * If 'offset' is beyond end of object file then treat it as not error
2553          * but valid case for SEEK_HOLE and return 'offset' as result.
2554          * LOV will decide if it is beyond real end of file or not.
2555          */
2556         if (whence == SEEK_HOLE && result == -ENXIO)
2557                 result = offset;
2558
2559         CDEBUG(D_INFO, "seek %s from %lld: %lld\n", whence == SEEK_HOLE ?
2560                        "hole" : "data", offset, result);
2561         RETURN(result);
2562 }
2563
2564 /*
2565  * in some cases we may need declare methods for objects being created
2566  * e.g., when we create symlink
2567  */
2568 const struct dt_body_operations osd_body_ops_new = {
2569         .dbo_declare_write = osd_declare_write,
2570 };
2571
2572 const struct dt_body_operations osd_body_ops = {
2573         .dbo_read                       = osd_read,
2574         .dbo_declare_write              = osd_declare_write,
2575         .dbo_write                      = osd_write,
2576         .dbo_bufs_get                   = osd_bufs_get,
2577         .dbo_bufs_put                   = osd_bufs_put,
2578         .dbo_write_prep                 = osd_write_prep,
2579         .dbo_declare_write_commit       = osd_declare_write_commit,
2580         .dbo_write_commit               = osd_write_commit,
2581         .dbo_read_prep                  = osd_read_prep,
2582         .dbo_declare_punch              = osd_declare_punch,
2583         .dbo_punch                      = osd_punch,
2584         .dbo_fiemap_get                 = osd_fiemap_get,
2585         .dbo_ladvise                    = osd_ladvise,
2586         .dbo_declare_fallocate          = osd_declare_fallocate,
2587         .dbo_fallocate                  = osd_fallocate,
2588         .dbo_lseek                      = osd_lseek,
2589 };
2590
2591 /**
2592  * Get a truncate lock
2593  *
2594  * In order to take multi-transaction truncate out of main transaction we let
2595  * the caller grab a lock on the object passed. the lock can be shared (for
2596  * writes) and exclusive (for truncate). It's not allowed to mix truncate
2597  * and write in the same transaction handle (do not confuse with big ldiskfs
2598  * transaction containing lots of handles).
2599  * The lock must be taken at declaration.
2600  *
2601  * \param obj           object to lock
2602  * \oh                  transaction
2603  * \shared              shared or exclusive
2604  *
2605  * \retval 0            lock is granted
2606  * \retval -NOMEM       no memory to allocate lock
2607  */
2608 int osd_trunc_lock(struct osd_object *obj, struct osd_thandle *oh, bool shared)
2609 {
2610         struct osd_access_lock *al, *tmp;
2611
2612         LASSERT(obj);
2613         LASSERT(oh);
2614
2615         list_for_each_entry(tmp, &oh->ot_trunc_locks, tl_list) {
2616                 if (tmp->tl_obj != obj)
2617                         continue;
2618                 LASSERT(tmp->tl_shared == shared);
2619                 /* found same lock */
2620                 return 0;
2621         }
2622
2623         OBD_ALLOC_PTR(al);
2624         if (unlikely(al == NULL))
2625                 return -ENOMEM;
2626         al->tl_obj = obj;
2627         al->tl_truncate = false;
2628         if (shared)
2629                 down_read(&obj->oo_ext_idx_sem);
2630         else
2631                 down_write(&obj->oo_ext_idx_sem);
2632         al->tl_shared = shared;
2633         lu_object_get(&obj->oo_dt.do_lu);
2634
2635         list_add(&al->tl_list, &oh->ot_trunc_locks);
2636
2637         return 0;
2638 }
2639
2640 void osd_trunc_unlock_all(const struct lu_env *env, struct list_head *list)
2641 {
2642         struct osd_access_lock *al, *tmp;
2643
2644         list_for_each_entry_safe(al, tmp, list, tl_list) {
2645                 if (al->tl_shared)
2646                         up_read(&al->tl_obj->oo_ext_idx_sem);
2647                 else
2648                         up_write(&al->tl_obj->oo_ext_idx_sem);
2649                 osd_object_put(env, al->tl_obj);
2650                 list_del(&al->tl_list);
2651                 OBD_FREE_PTR(al);
2652         }
2653 }
2654
2655 /* For a partial-page punch, flush punch range to disk immediately */
2656 static void osd_partial_page_flush_punch(struct osd_device *d,
2657                                          struct inode *inode, loff_t start,
2658                                          loff_t end)
2659 {
2660         if (osd_use_page_cache(d)) {
2661                 filemap_fdatawrite_range(inode->i_mapping, start, end);
2662         } else {
2663                 /* Notice we use "wait" version to ensure I/O is complete */
2664                 filemap_write_and_wait_range(inode->i_mapping, start,
2665                                              end);
2666                 invalidate_mapping_pages(inode->i_mapping, start >> PAGE_SHIFT,
2667                                          end >> PAGE_SHIFT);
2668         }
2669 }
2670
2671 /*
2672  * For a partial-page truncate, flush the page to disk immediately to
2673  * avoid data corruption during direct disk write.  b=17397
2674  */
2675 static void osd_partial_page_flush(struct osd_device *d, struct inode *inode,
2676                                    loff_t offset)
2677 {
2678         if (!(offset & ~PAGE_MASK))
2679                 return;
2680
2681         if (osd_use_page_cache(d)) {
2682                 filemap_fdatawrite_range(inode->i_mapping, offset, offset + 1);
2683         } else {
2684                 /* Notice we use "wait" version to ensure I/O is complete */
2685                 filemap_write_and_wait_range(inode->i_mapping, offset,
2686                                              offset + 1);
2687                 invalidate_mapping_pages(inode->i_mapping, offset >> PAGE_SHIFT,
2688                                          offset >> PAGE_SHIFT);
2689         }
2690 }
2691
2692 void osd_execute_truncate(struct osd_object *obj)
2693 {
2694         struct osd_device *d = osd_obj2dev(obj);
2695         struct inode *inode = obj->oo_inode;
2696         __u64 size;
2697
2698         /* simulate crash before (in the middle) of delayed truncate */
2699         if (CFS_FAIL_CHECK(OBD_FAIL_OSD_FAIL_AT_TRUNCATE)) {
2700                 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2701                 struct ldiskfs_sb_info *sbi = LDISKFS_SB(inode->i_sb);
2702
2703                 mutex_lock(&sbi->s_orphan_lock);
2704                 list_del_init(&ei->i_orphan);
2705                 mutex_unlock(&sbi->s_orphan_lock);
2706                 return;
2707         }
2708
2709         size = i_size_read(inode);
2710         inode_lock(inode);
2711         /* if object holds encrypted content, we need to make sure we truncate
2712          * on an encryption unit boundary, or block content will get corrupted
2713          */
2714         if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2715             size & ~LUSTRE_ENCRYPTION_MASK)
2716                 inode->i_size = (size & LUSTRE_ENCRYPTION_MASK) +
2717                         LUSTRE_ENCRYPTION_UNIT_SIZE;
2718         ldiskfs_truncate(inode);
2719         inode_unlock(inode);
2720         if (inode->i_size != size) {
2721                 spin_lock(&inode->i_lock);
2722                 i_size_write(inode, size);
2723                 LDISKFS_I(inode)->i_disksize = size;
2724                 spin_unlock(&inode->i_lock);
2725                 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2726         }
2727         osd_partial_page_flush(d, inode, size);
2728 }
2729
2730 static int osd_execute_punch(const struct lu_env *env, struct osd_object *obj,
2731                              loff_t start, loff_t end, int mode)
2732 {
2733         struct osd_device *d = osd_obj2dev(obj);
2734         struct inode *inode = obj->oo_inode;
2735         struct file *file;
2736         int rc;
2737
2738         file = alloc_file_pseudo(inode, d->od_mnt, "/", O_NOATIME,
2739                                  inode->i_fop);
2740         if (IS_ERR(file))
2741                 RETURN(PTR_ERR(file));
2742
2743         file->f_mode |= FMODE_64BITHASH;
2744         rc = file->f_op->fallocate(file, mode, start, end - start);
2745         ihold(inode);
2746         fput(file);
2747         if (rc == 0)
2748                 osd_partial_page_flush_punch(d, inode, start, end - 1);
2749         return rc;
2750 }
2751
2752 int osd_process_truncates(const struct lu_env *env, struct list_head *list)
2753 {
2754         struct osd_access_lock *al;
2755         int rc = 0;
2756
2757         LASSERT(!journal_current_handle());
2758
2759         list_for_each_entry(al, list, tl_list) {
2760                 if (al->tl_shared)
2761                         continue;
2762                 if (al->tl_truncate)
2763                         osd_execute_truncate(al->tl_obj);
2764                 else if (al->tl_punch)
2765                         rc = osd_execute_punch(env, al->tl_obj, al->tl_start,
2766                                                al->tl_end, al->tl_mode);
2767         }
2768
2769         return rc;
2770 }