Whamcloud - gitweb
LU-16847 ldiskfs: ->fiemap replaced with ldiskfs_map_blocks.
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/osd/osd_io.c
32  *
33  * body operations
34  *
35  * Author: Nikita Danilov <nikita@clusterfs.com>
36  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
37  *
38  */
39
40 #define DEBUG_SUBSYSTEM S_OSD
41
42 /* prerequisite for linux/xattr.h */
43 #include <linux/types.h>
44 /* prerequisite for linux/xattr.h */
45 #include <linux/fs.h>
46 #include <linux/mm.h>
47 #include <linux/swap.h>
48 #include <linux/pagevec.h>
49
50 /*
51  * struct OBD_{ALLOC,FREE}*()
52  */
53 #include <obd_support.h>
54 #include <libcfs/libcfs.h>
55
56 #include "osd_internal.h"
57
58 /* ext_depth() */
59 #include <ldiskfs/ldiskfs_extents.h>
60 #include <ldiskfs/ldiskfs.h>
61
62 struct kmem_cache *biop_cachep;
63
64 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
65 static void dio_complete_routine(struct bio *bio);
66 #else
67 static void dio_complete_routine(struct bio *bio, int error);
68 #endif
69
70 static int osd_bio_init(struct bio *bio, struct osd_iobuf *iobuf,
71                         bool integrity_enabled, int start_page_idx)
72 {
73         struct osd_bio_private *bio_private = NULL;
74         ENTRY;
75
76         OBD_SLAB_ALLOC_GFP(bio_private, biop_cachep, sizeof(*bio_private),
77                            GFP_NOIO);
78         if (bio_private == NULL)
79                 RETURN(-ENOMEM);
80
81         bio->bi_end_io = dio_complete_routine;
82         bio->bi_private = bio_private;
83         bio_private->obp_start_page_idx = start_page_idx;
84         bio_private->obp_iobuf = iobuf;
85
86         RETURN(0);
87 }
88
89 static void osd_bio_fini(struct bio *bio)
90 {
91         struct osd_bio_private *bio_private;
92
93         if (!bio)
94                 return;
95         bio_private = bio->bi_private;
96         bio_put(bio);
97         OBD_SLAB_FREE(bio_private, biop_cachep, sizeof(*bio_private));
98 }
99
100 static inline bool osd_use_page_cache(struct osd_device *d)
101 {
102         /* do not use pagecache if write and read caching are disabled */
103         if (d->od_writethrough_cache + d->od_read_cache == 0)
104                 return false;
105         /* use pagecache by default */
106         return true;
107 }
108
109 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
110                             int rw, int line, int pages)
111 {
112         int blocks, i;
113
114         LASSERTF(iobuf->dr_elapsed_valid == 0,
115                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
116                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
117                  iobuf->dr_init_at);
118         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
119
120         init_waitqueue_head(&iobuf->dr_wait);
121         atomic_set(&iobuf->dr_numreqs, 0);
122         iobuf->dr_npages = 0;
123         iobuf->dr_error = 0;
124         iobuf->dr_dev = d;
125         iobuf->dr_frags = 0;
126         iobuf->dr_elapsed = ktime_set(0, 0);
127         /* must be counted before, so assert */
128         iobuf->dr_rw = rw;
129         iobuf->dr_init_at = line;
130
131         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
132         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
133                 LASSERT(iobuf->dr_pg_buf.lb_len >=
134                         pages * sizeof(iobuf->dr_pages[0]));
135                 return 0;
136         }
137
138         /* start with 1MB for 4K blocks */
139         i = 256;
140         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
141                 i <<= 1;
142
143         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
144                (unsigned int)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
145         pages = i;
146         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
147         iobuf->dr_max_pages = 0;
148         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
149                (unsigned int)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
150
151         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
152         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
153         if (unlikely(iobuf->dr_blocks == NULL))
154                 return -ENOMEM;
155
156         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
157         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
158         if (unlikely(iobuf->dr_pages == NULL))
159                 return -ENOMEM;
160
161         lu_buf_realloc(&iobuf->dr_lnb_buf,
162                        pages * sizeof(iobuf->dr_lnbs[0]));
163         iobuf->dr_lnbs = iobuf->dr_lnb_buf.lb_buf;
164         if (unlikely(iobuf->dr_lnbs == NULL))
165                 return -ENOMEM;
166
167         iobuf->dr_max_pages = pages;
168
169         return 0;
170 }
171 #define osd_init_iobuf(dev, iobuf, rw, pages) \
172         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
173
174 static void osd_iobuf_add_page(struct osd_iobuf *iobuf,
175                                struct niobuf_local *lnb)
176 {
177         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
178         iobuf->dr_pages[iobuf->dr_npages] = lnb->lnb_page;
179         iobuf->dr_lnbs[iobuf->dr_npages] = lnb;
180         iobuf->dr_npages++;
181 }
182
183 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
184 {
185         int rw = iobuf->dr_rw;
186
187         if (iobuf->dr_elapsed_valid) {
188                 struct brw_stats *h = &d->od_brw_stats;
189
190                 iobuf->dr_elapsed_valid = 0;
191                 LASSERT(iobuf->dr_dev == d);
192                 LASSERT(iobuf->dr_frags > 0);
193                 lprocfs_oh_tally_pcpu(&h->bs_hist[BRW_R_DIO_FRAGS+rw],
194                                       iobuf->dr_frags);
195                 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[BRW_R_IO_TIME+rw],
196                                            ktime_to_ms(iobuf->dr_elapsed));
197         }
198
199         iobuf->dr_error = 0;
200 }
201
202
203 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
204 static void dio_complete_routine(struct bio *bio)
205 {
206         int error = blk_status_to_errno(bio->bi_status);
207 #else
208 static void dio_complete_routine(struct bio *bio, int error)
209 {
210 #endif
211         struct osd_bio_private *bio_private = bio->bi_private;
212         struct osd_iobuf *iobuf = bio_private->obp_iobuf;
213         struct bio_vec *bvl;
214
215
216         /* CAVEAT EMPTOR: possibly in IRQ context
217          * DO NOT record procfs stats here!!!
218          */
219         if (unlikely(iobuf == NULL)) {
220                 CERROR("***** bio->bi_private is NULL! Dump the bio contents to the console. Please report this to <https://jira.whamcloud.com/>, and probably have to reboot this node.\n");
221                 CERROR("bi_next: %p, bi_flags: %lx, " __stringify(bi_opf)
222                        ": %x, bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, bi_private: %p\n",
223                        bio->bi_next, (unsigned long)bio->bi_flags,
224                        (unsigned int)bio->bi_opf, bio->bi_vcnt, bio_idx(bio),
225                        bio_sectors(bio) << 9, bio->bi_end_io,
226                        atomic_read(&bio->__bi_cnt),
227                        bio->bi_private);
228                 return;
229         }
230
231         /* the check is outside of the cycle for performance reason -bzzz */
232         if (!bio_data_dir(bio)) {
233                 DECLARE_BVEC_ITER_ALL(iter_all);
234
235                 bio_for_each_segment_all(bvl, bio, iter_all) {
236                         if (likely(error == 0))
237                                 SetPageUptodate(bvl_to_page(bvl));
238                         LASSERT(PageLocked(bvl_to_page(bvl)));
239                 }
240                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
241         } else {
242                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
243         }
244
245         /* any real error is good enough -bzzz */
246         if (error != 0 && iobuf->dr_error == 0)
247                 iobuf->dr_error = error;
248
249         /*
250          * set dr_elapsed before dr_numreqs turns to 0, otherwise
251          * it's possible that service thread will see dr_numreqs
252          * is zero, but dr_elapsed is not set yet, leading to lost
253          * data in this processing and an assertion in a subsequent
254          * call to OSD.
255          */
256         if (atomic_read(&iobuf->dr_numreqs) == 1) {
257                 ktime_t now = ktime_get();
258
259                 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
260                 iobuf->dr_elapsed_valid = 1;
261         }
262         if (atomic_dec_and_test(&iobuf->dr_numreqs))
263                 wake_up(&iobuf->dr_wait);
264
265         /* Completed bios used to be chained off iobuf->dr_bios and freed in
266          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
267          * mempool when serious on-disk fragmentation was encountered,
268          * deadlocking the OST.  The bios are now released as soon as complete
269          * so the pool cannot be exhausted while IOs are competing. b=10076
270          */
271         osd_bio_fini(bio);
272 }
273
274 static void record_start_io(struct osd_iobuf *iobuf, int size)
275 {
276         struct osd_device *osd = iobuf->dr_dev;
277         struct brw_stats *h = &osd->od_brw_stats;
278
279         iobuf->dr_frags++;
280         atomic_inc(&iobuf->dr_numreqs);
281
282         if (iobuf->dr_rw == 0) {
283                 atomic_inc(&osd->od_r_in_flight);
284                 lprocfs_oh_tally_pcpu(&h->bs_hist[BRW_R_RPC_HIST],
285                                  atomic_read(&osd->od_r_in_flight));
286                 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[BRW_R_DISK_IOSIZE],
287                                            size);
288         } else if (iobuf->dr_rw == 1) {
289                 atomic_inc(&osd->od_w_in_flight);
290                 lprocfs_oh_tally_pcpu(&h->bs_hist[BRW_W_RPC_HIST],
291                                  atomic_read(&osd->od_w_in_flight));
292                 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[BRW_W_DISK_IOSIZE],
293                                            size);
294         } else {
295                 LBUG();
296         }
297 }
298
299 static void osd_submit_bio(int rw, struct bio *bio)
300 {
301         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
302 #ifdef HAVE_SUBMIT_BIO_2ARGS
303         submit_bio(rw ? WRITE : READ, bio);
304 #else
305         bio->bi_opf |= rw;
306         submit_bio(bio);
307 #endif
308 }
309
310 static int can_be_merged(struct bio *bio, sector_t sector)
311 {
312         if (bio == NULL)
313                 return 0;
314
315         return bio_end_sector(bio) == sector ? 1 : 0;
316 }
317
318
319 static void osd_mark_page_io_done(struct osd_iobuf *iobuf,
320                                   struct inode *inode,
321                                   sector_t start_blocks,
322                                   sector_t count)
323 {
324         struct niobuf_local *lnb;
325         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
326         pgoff_t pg_start, pg_end;
327
328         pg_start = start_blocks / blocks_per_page;
329         if (start_blocks % blocks_per_page)
330                 pg_start++;
331         if (count >= blocks_per_page)
332                 pg_end = (start_blocks + count -
333                           blocks_per_page) / blocks_per_page;
334         else
335                 return; /* nothing to mark */
336         for ( ; pg_start <= pg_end; pg_start++) {
337                 lnb = iobuf->dr_lnbs[pg_start];
338                 lnb->lnb_flags |= OBD_BRW_DONE;
339         }
340 }
341
342 /*
343  * Linux v5.12-rc1-20-ga8affc03a9b3
344  *  block: rename BIO_MAX_PAGES to BIO_MAX_VECS
345  */
346 #ifndef BIO_MAX_VECS
347 #define BIO_MAX_VECS    BIO_MAX_PAGES
348 #endif
349
350 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
351                       struct osd_iobuf *iobuf, sector_t start_blocks,
352                       sector_t count)
353 {
354         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
355         struct page **pages = iobuf->dr_pages;
356         int npages = iobuf->dr_npages;
357         sector_t *blocks = iobuf->dr_blocks;
358         struct super_block *sb = inode->i_sb;
359         int sector_bits = sb->s_blocksize_bits - 9;
360         unsigned int blocksize = sb->s_blocksize;
361         struct block_device *bdev = sb->s_bdev;
362         struct bio *bio = NULL;
363         int bio_start_page_idx;
364         struct page *page;
365         unsigned int page_offset;
366         sector_t sector;
367         int nblocks;
368         int block_idx, block_idx_end;
369         int page_idx, page_idx_start;
370         int i;
371         int rc = 0;
372         bool fault_inject;
373         bool integrity_enabled;
374         struct blk_plug plug;
375         int blocks_left_page;
376
377         ENTRY;
378
379         fault_inject = CFS_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT);
380         LASSERT(iobuf->dr_npages == npages);
381
382         integrity_enabled = bdev_integrity_enabled(bdev, iobuf->dr_rw);
383
384         osd_brw_stats_update(osd, iobuf);
385         iobuf->dr_start_time = ktime_get();
386
387         if (!count)
388                 count = npages * blocks_per_page;
389         block_idx_end = start_blocks + count;
390
391         blk_start_plug(&plug);
392
393         page_idx_start = start_blocks / blocks_per_page;
394         for (page_idx = page_idx_start, block_idx = start_blocks;
395              block_idx < block_idx_end; page_idx++,
396              block_idx += blocks_left_page) {
397                 /* For cases where the filesystems blocksize is not the
398                  * same as PAGE_SIZE (e.g. ARM with PAGE_SIZE=64KB and
399                  * blocksize=4KB), there will be multiple blocks to
400                  * read/write per page. Also, the start and end block may
401                  * not be aligned to the start and end of the page, so the
402                  * first page may skip some blocks at the start ("i != 0",
403                  * "blocks_left_page" is reduced), and the last page may
404                  * skip some blocks at the end (limited by "count").
405                  */
406                 page = pages[page_idx];
407                 LASSERT(page_idx < iobuf->dr_npages);
408
409                 i = block_idx % blocks_per_page;
410                 blocks_left_page = blocks_per_page - i;
411                 if (block_idx + blocks_left_page > block_idx_end)
412                         blocks_left_page = block_idx_end - block_idx;
413                 page_offset = i * blocksize;
414                 for (i = 0; i < blocks_left_page;
415                      i += nblocks, page_offset += blocksize * nblocks) {
416                         nblocks = 1;
417
418                         if (blocks[block_idx + i] == 0) {  /* hole */
419                                 LASSERTF(iobuf->dr_rw == 0,
420                                          "page_idx %u, block_idx %u, i %u,"
421                                          "start_blocks: %llu, count: %llu, npages: %d\n",
422                                          page_idx, block_idx, i,
423                                          (unsigned long long)start_blocks,
424                                          (unsigned long long)count, npages);
425                                 memset(kmap(page) + page_offset, 0, blocksize);
426                                 kunmap(page);
427                                 continue;
428                         }
429
430                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
431
432                         /* Additional contiguous file blocks? */
433                         while (i + nblocks < blocks_left_page &&
434                                (sector + (nblocks << sector_bits)) ==
435                                ((sector_t)blocks[block_idx + i + nblocks] <<
436                                  sector_bits))
437                                 nblocks++;
438
439                         if (bio && can_be_merged(bio, sector) &&
440                             bio_add_page(bio, page, blocksize * nblocks,
441                                          page_offset) != 0)
442                                 continue;       /* added this frag OK */
443
444                         if (bio != NULL) {
445                                 struct request_queue *q = bio_get_queue(bio);
446                                 unsigned int bi_size = bio_sectors(bio) << 9;
447
448                                 /* Dang! I have to fragment this I/O */
449                                 CDEBUG(D_INODE,
450                                        "bio++ sz %d vcnt %d(%d) sectors %d(%d) psg %d(%d)\n",
451                                        bi_size, bio->bi_vcnt, bio->bi_max_vecs,
452                                        bio_sectors(bio),
453                                        queue_max_sectors(q),
454                                        osd_bio_nr_segs(bio),
455                                        queue_max_segments(q));
456                                 rc = osd_bio_integrity_handle(osd, bio,
457                                         iobuf, bio_start_page_idx,
458                                         fault_inject, integrity_enabled);
459                                 if (rc) {
460                                         goto out;
461                                 }
462
463                                 record_start_io(iobuf, bi_size);
464                                 osd_submit_bio(iobuf->dr_rw, bio);
465                         }
466
467                         bio_start_page_idx = page_idx;
468                         /* allocate new bio */
469                         bio = cfs_bio_alloc(bdev,
470                                             min_t(unsigned short, BIO_MAX_VECS,
471                                                   (block_idx_end - block_idx +
472                                                    blocks_left_page - 1)),
473                                             iobuf->dr_rw ? REQ_OP_WRITE
474                                                          : REQ_OP_READ,
475                                             GFP_NOIO);
476                         if (!bio) {
477                                 CERROR("Can't allocate bio %u pages\n",
478                                        block_idx_end - block_idx +
479                                        blocks_left_page - 1);
480                                 rc = -ENOMEM;
481                                 goto out;
482                         }
483                         bio_set_sector(bio, sector);
484                         rc = osd_bio_init(bio, iobuf, integrity_enabled,
485                                           bio_start_page_idx);
486                         if (rc)
487                                 goto out;
488
489                         rc = bio_add_page(bio, page,
490                                           blocksize * nblocks, page_offset);
491                         LASSERT(rc != 0);
492                 }
493         }
494
495         if (bio != NULL) {
496                 rc = osd_bio_integrity_handle(osd, bio, iobuf,
497                                               bio_start_page_idx,
498                                               fault_inject,
499                                               integrity_enabled);
500                 if (rc)
501                         goto out;
502
503                 record_start_io(iobuf, bio_sectors(bio) << 9);
504                 osd_submit_bio(iobuf->dr_rw, bio);
505                 rc = 0;
506         }
507
508 out:
509         blk_finish_plug(&plug);
510
511         /* in order to achieve better IO throughput, we don't wait for writes
512          * completion here. instead we proceed with transaction commit in
513          * parallel and wait for IO completion once transaction is stopped
514          * see osd_trans_stop() for more details -bzzz
515          */
516         if (iobuf->dr_rw == 0 || fault_inject)
517                 wait_event(iobuf->dr_wait,
518                            atomic_read(&iobuf->dr_numreqs) == 0);
519
520         if (rc == 0)
521                 rc = iobuf->dr_error;
522         else
523                 osd_bio_fini(bio);
524
525         if (iobuf->dr_rw == 0 || fault_inject)
526                 osd_fini_iobuf(osd, iobuf);
527
528         /* Write only now */
529         if (rc == 0 && iobuf->dr_rw)
530                 osd_mark_page_io_done(iobuf, inode,
531                                       start_blocks, count);
532
533         RETURN(rc);
534 }
535
536 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
537                                    struct niobuf_local *lnb, int maxlnb)
538 {
539         int rc = 0;
540         ENTRY;
541
542         *nrpages = 0;
543
544         while (len > 0) {
545                 int poff = offset & (PAGE_SIZE - 1);
546                 int plen = PAGE_SIZE - poff;
547
548                 if (*nrpages >= maxlnb) {
549                         rc = -EOVERFLOW;
550                         break;
551                 }
552
553                 if (plen > len)
554                         plen = len;
555                 lnb->lnb_file_offset = offset;
556                 lnb->lnb_page_offset = poff;
557                 lnb->lnb_len = plen;
558                 /* lnb->lnb_flags = rnb->rnb_flags; */
559                 lnb->lnb_flags = 0;
560                 lnb->lnb_page = NULL;
561                 lnb->lnb_rc = 0;
562                 lnb->lnb_guard_rpc = 0;
563                 lnb->lnb_guard_disk = 0;
564                 lnb->lnb_locked = 0;
565
566                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
567                          (long long) len);
568                 offset += plen;
569                 len -= plen;
570                 lnb++;
571                 (*nrpages)++;
572         }
573
574         RETURN(rc);
575 }
576
577 static struct page *osd_get_page(const struct lu_env *env, struct dt_object *dt,
578                                  loff_t offset, gfp_t gfp_mask, bool cache)
579 {
580         struct osd_thread_info *oti = osd_oti_get(env);
581         struct inode *inode = osd_dt_obj(dt)->oo_inode;
582         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
583         struct page *page;
584         int cur;
585
586         LASSERT(inode);
587
588         if (cache) {
589                 page = find_or_create_page(inode->i_mapping,
590                                            offset >> PAGE_SHIFT, gfp_mask);
591
592                 if (likely(page)) {
593                         LASSERT(!PagePrivate2(page));
594                         wait_on_page_writeback(page);
595                 } else {
596                         lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
597                 }
598
599                 return page;
600         }
601
602         if (inode->i_mapping->nrpages) {
603                 /* consult with pagecache, but do not create new pages */
604                 /* this is normally used once */
605                 page = find_lock_page(inode->i_mapping, offset >> PAGE_SHIFT);
606                 if (page) {
607                         wait_on_page_writeback(page);
608                         return page;
609                 }
610         }
611
612         LASSERT(oti->oti_dio_pages);
613         cur = oti->oti_dio_pages_used;
614         page = oti->oti_dio_pages[cur];
615
616         if (unlikely(!page)) {
617                 LASSERT(cur < PTLRPC_MAX_BRW_PAGES);
618                 page = alloc_page(gfp_mask);
619                 if (!page)
620                         return NULL;
621                 oti->oti_dio_pages[cur] = page;
622                 SetPagePrivate2(page);
623                 lock_page(page);
624         }
625
626         ClearPageUptodate(page);
627         page->index = offset >> PAGE_SHIFT;
628         oti->oti_dio_pages_used++;
629
630         return page;
631 }
632
633 /*
634  * there are following "locks":
635  * journal_start
636  * i_mutex
637  * page lock
638  *
639  * osd write path:
640  *  - lock page(s)
641  *  - journal_start
642  *  - truncate_sem
643  *
644  * ext4 vmtruncate:
645  *  - lock pages, unlock
646  *  - journal_start
647  *  - lock partial page
648  *  - i_data_sem
649  *
650  */
651
652 /**
653  * Unlock and release pages loaded by osd_bufs_get()
654  *
655  * Unlock \a npages pages from \a lnb and drop the refcount on them.
656  *
657  * \param env           thread execution environment
658  * \param dt            dt object undergoing IO (OSD object + methods)
659  * \param lnb           array of pages undergoing IO
660  * \param npages        number of pages in \a lnb
661  *
662  * \retval 0            always
663  */
664 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
665                         struct niobuf_local *lnb, int npages)
666 {
667         struct osd_thread_info *oti = osd_oti_get(env);
668         struct pagevec pvec;
669         int i;
670
671         ll_pagevec_init(&pvec, 0);
672
673         for (i = 0; i < npages; i++) {
674                 struct page *page = lnb[i].lnb_page;
675
676                 if (page == NULL)
677                         continue;
678
679                 /* if the page isn't cached, then reset uptodate
680                  * to prevent reuse
681                  */
682                 if (PagePrivate2(page)) {
683                         oti->oti_dio_pages_used--;
684                 } else {
685                         if (lnb[i].lnb_locked)
686                                 unlock_page(page);
687                         if (pagevec_add(&pvec, page) == 0)
688                                 pagevec_release(&pvec);
689                 }
690
691                 lnb[i].lnb_page = NULL;
692         }
693
694         LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used);
695
696         /* Release any partial pagevec */
697         pagevec_release(&pvec);
698
699         RETURN(0);
700 }
701
702 /**
703  * Load and lock pages undergoing IO
704  *
705  * Pages as described in the \a lnb array are fetched (from disk or cache)
706  * and locked for IO by the caller.
707  *
708  * DLM locking protects us from write and truncate competing for same region,
709  * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
710  * It's possible the writeout on a such a page is in progress when we access
711  * it. It's also possible that during this writeout we put new (partial) data
712  * into the page, but won't be able to proceed in filter_commitrw_write().
713  * Therefore, just wait for writeout completion as it should be rare enough.
714  *
715  * \param env           thread execution environment
716  * \param dt            dt object undergoing IO (OSD object + methods)
717  * \param pos           byte offset of IO start
718  * \param len           number of bytes of IO
719  * \param lnb           array of extents undergoing IO
720  * \param rw            read or write operation, and other flags
721  * \param capa          capabilities
722  *
723  * \retval pages        (zero or more) loaded successfully
724  * \retval -ENOMEM      on memory/page allocation error
725  */
726 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
727                         loff_t pos, ssize_t len, struct niobuf_local *lnb,
728                         int maxlnb, enum dt_bufs_type rw)
729 {
730         struct osd_thread_info *oti = osd_oti_get(env);
731         struct osd_object *obj = osd_dt_obj(dt);
732         struct osd_device *osd   = osd_obj2dev(obj);
733         int npages, i, iosize, rc = 0;
734         bool cache, write;
735         loff_t fsize;
736         gfp_t gfp_mask;
737
738         LASSERT(obj->oo_inode);
739
740         if (unlikely(obj->oo_destroyed))
741                 RETURN(-ENOENT);
742
743         rc = osd_map_remote_to_local(pos, len, &npages, lnb, maxlnb);
744         if (rc)
745                 RETURN(rc);
746
747         write = rw & DT_BUFS_TYPE_WRITE;
748
749         fsize = lnb[npages - 1].lnb_file_offset + lnb[npages - 1].lnb_len;
750         iosize = fsize - lnb[0].lnb_file_offset;
751         fsize = max(fsize, i_size_read(obj->oo_inode));
752
753         cache = rw & DT_BUFS_TYPE_READAHEAD;
754         if (cache)
755                 goto bypass_checks;
756
757         cache = osd_use_page_cache(osd);
758         while (cache) {
759                 if (write) {
760                         if (!osd->od_writethrough_cache) {
761                                 cache = false;
762                                 break;
763                         }
764                         if (iosize > osd->od_writethrough_max_iosize) {
765                                 cache = false;
766                                 break;
767                         }
768                 } else {
769                         if (!osd->od_read_cache) {
770                                 cache = false;
771                                 break;
772                         }
773                         if (iosize > osd->od_readcache_max_iosize) {
774                                 cache = false;
775                                 break;
776                         }
777                 }
778                 /* don't use cache on large files */
779                 if (osd->od_readcache_max_filesize &&
780                     fsize > osd->od_readcache_max_filesize)
781                         cache = false;
782                 break;
783         }
784
785 bypass_checks:
786         if (!cache && unlikely(!oti->oti_dio_pages)) {
787                 OBD_ALLOC_PTR_ARRAY_LARGE(oti->oti_dio_pages,
788                                           PTLRPC_MAX_BRW_PAGES);
789                 if (!oti->oti_dio_pages)
790                         return -ENOMEM;
791         }
792
793         /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
794         gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
795                                              GFP_HIGHUSER;
796         for (i = 0; i < npages; i++, lnb++) {
797                 lnb->lnb_page = osd_get_page(env, dt, lnb->lnb_file_offset,
798                                              gfp_mask, cache);
799                 if (lnb->lnb_page == NULL)
800                         GOTO(cleanup, rc = -ENOMEM);
801
802                 lnb->lnb_locked = 1;
803                 if (cache)
804                         mark_page_accessed(lnb->lnb_page);
805         }
806
807 #if 0
808         /* XXX: this version doesn't invalidate cached pages, but use them */
809         if (!cache && write && obj->oo_inode->i_mapping->nrpages) {
810                 /* do not allow data aliasing, invalidate pagecache */
811                 /* XXX: can be quite expensive in mixed case */
812                 invalidate_mapping_pages(obj->oo_inode->i_mapping,
813                                 lnb[0].lnb_file_offset >> PAGE_SHIFT,
814                                 lnb[npages - 1].lnb_file_offset >> PAGE_SHIFT);
815         }
816 #endif
817
818         RETURN(i);
819
820 cleanup:
821         if (i > 0)
822                 osd_bufs_put(env, dt, lnb - i, i);
823         return rc;
824 }
825
826 #ifdef HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS
827 static int osd_extend_restart_trans(handle_t *handle, int needed,
828                                     struct inode *inode)
829 {
830         int rc;
831
832         rc = ldiskfs_journal_ensure_credits(handle, needed,
833                 ldiskfs_trans_default_revoke_credits(inode->i_sb));
834         /* this means journal has been restarted */
835         if (rc > 0)
836                 rc = 0;
837
838         return rc;
839 }
840 #else
841 static int osd_extend_restart_trans(handle_t *handle, int needed,
842                                     struct inode *inode)
843 {
844         int rc;
845
846         if (ldiskfs_handle_has_enough_credits(handle, needed))
847                 return 0;
848         rc = ldiskfs_journal_extend(handle,
849                                 needed - handle->h_buffer_credits);
850         if (rc <= 0)
851                 return rc;
852
853         return ldiskfs_journal_restart(handle, needed);
854 }
855 #endif /* HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS */
856
857 static int osd_ldiskfs_map_write(struct inode *inode, struct osd_iobuf *iobuf,
858                                  struct osd_device *osd, sector_t start_blocks,
859                                  sector_t count, loff_t *disk_size,
860                                  __u64 user_size)
861 {
862         /* if file has grown, take user_size into account */
863         if (user_size && *disk_size > user_size)
864                 *disk_size = user_size;
865
866         spin_lock(&inode->i_lock);
867         if (*disk_size > i_size_read(inode)) {
868                 i_size_write(inode, *disk_size);
869                 LDISKFS_I(inode)->i_disksize = *disk_size;
870                 spin_unlock(&inode->i_lock);
871                 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
872         } else {
873                 spin_unlock(&inode->i_lock);
874         }
875
876         /*
877          * We don't do stats here as in read path because
878          * write is async: we'll do this in osd_put_bufs()
879          */
880         return osd_do_bio(osd, inode, iobuf, start_blocks, count);
881 }
882
883 static unsigned int osd_extent_bytes(const struct osd_device *o)
884 {
885         unsigned int *extent_bytes_ptr =
886                         raw_cpu_ptr(o->od_extent_bytes_percpu);
887
888         if (likely(*extent_bytes_ptr))
889                 return *extent_bytes_ptr;
890
891         /* initialize on first access or CPU hotplug */
892         if (!ldiskfs_has_feature_extents(osd_sb(o)))
893                 *extent_bytes_ptr = 1 << osd_sb(o)->s_blocksize_bits;
894         else
895                 *extent_bytes_ptr = OSD_DEFAULT_EXTENT_BYTES;
896
897         return *extent_bytes_ptr;
898 }
899
900 #define EXTENT_BYTES_DECAY 64
901 static void osd_decay_extent_bytes(struct osd_device *osd,
902                                    unsigned int new_bytes)
903 {
904         unsigned int old_bytes;
905
906         if (!ldiskfs_has_feature_extents(osd_sb(osd)))
907                 return;
908
909         old_bytes = osd_extent_bytes(osd);
910         *raw_cpu_ptr(osd->od_extent_bytes_percpu) =
911                 (old_bytes * (EXTENT_BYTES_DECAY - 1) +
912                  min(new_bytes, OSD_DEFAULT_EXTENT_BYTES) +
913                  EXTENT_BYTES_DECAY - 1) / EXTENT_BYTES_DECAY;
914 }
915
916 static int osd_ldiskfs_map_inode_pages(struct inode *inode,
917                                        struct osd_iobuf *iobuf,
918                                        struct osd_device *osd,
919                                        int create, __u64 user_size,
920                                        int check_credits,
921                                        struct thandle *thandle)
922 {
923         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
924         int blocksize = 1 << inode->i_blkbits;
925         int rc = 0, i = 0, mapped_index = 0;
926         struct page *fp = NULL;
927         int clen = 0;
928         pgoff_t max_page_index;
929         handle_t *handle = NULL;
930         sector_t start_blocks = 0, count = 0;
931         loff_t disk_size = 0;
932         struct page **page = iobuf->dr_pages;
933         int pages = iobuf->dr_npages;
934         sector_t *blocks = iobuf->dr_blocks;
935         struct niobuf_local *lnb1, *lnb2;
936         loff_t size1, size2;
937
938         max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
939
940         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
941                 inode->i_ino, pages, (*page)->index);
942
943         if (create) {
944                 create = LDISKFS_GET_BLOCKS_CREATE;
945                 handle = ldiskfs_journal_current_handle();
946                 LASSERT(handle != NULL);
947                 rc = osd_attach_jinode(inode);
948                 if (rc)
949                         return rc;
950                 disk_size = i_size_read(inode);
951                 /* if disk_size is already bigger than specified user_size,
952                  * ignore user_size
953                  */
954                 if (disk_size > user_size)
955                         user_size = 0;
956         }
957         /* pages are sorted already. so, we just have to find
958          * contig. space and process them properly
959          */
960         while (i < pages) {
961                 long blen, total = 0, previous_total = 0;
962                 struct ldiskfs_map_blocks map = { 0 };
963                 ktime_t time;
964
965                 if (fp == NULL) { /* start new extent */
966                         fp = *page++;
967                         clen = 1;
968                         if (++i != pages)
969                                 continue;
970                 } else if (fp->index + clen == (*page)->index) {
971                         /* continue the extent */
972                         page++;
973                         clen++;
974                         if (++i != pages)
975                                 continue;
976                 }
977                 if (fp->index + clen >= max_page_index)
978                         GOTO(cleanup, rc = -EFBIG);
979                 /* process found extent */
980                 map.m_lblk = fp->index * blocks_per_page;
981                 map.m_len = blen = clen * blocks_per_page;
982
983                 /*
984                  * For PAGE_SIZE > blocksize block allocation mapping, the
985                  * ldiskfs_map_blocks() aims at looking up already mapped
986                  * blocks, recording them to iobuf->dr_blocks and fixing up
987                  * m_lblk, m_len for un-allocated blocks to be created/mapped
988                  * in the second ldiskfs_map_blocks().
989                  *
990                  * M_lblk should be the first un-allocated block if m_lblk
991                  * points at an already allocated block when create = 1,
992                  * ldiskfs_map_blocks() will just return with already
993                  * allocated blocks and without allocating any requested
994                  * new blocks for the extent. For PAGE_SIZE = blocksize
995                  * case, if m_lblk points at an already allocated block it
996                  * will point at an un-allocated block in next restart
997                  * transaction, because the already mapped block/page will
998                  * be filtered out in next restart transaction via flag
999                  * OBD_BRW_DONE in osd_declare_write_commit().
1000                  */
1001                 if (create && PAGE_SIZE > blocksize) {
1002                         /* With flags=0 just for already mapped blocks lookup */
1003                         rc = ldiskfs_map_blocks(handle, inode, &map, 0);
1004                         if (rc > 0 && map.m_flags & LDISKFS_MAP_MAPPED) {
1005                                 for (; total < blen && total < map.m_len;
1006                                                 total++)
1007                                         *(blocks + total) = map.m_pblk + total;
1008
1009                                 /* The extent is already full mapped */
1010                                 if (total == blen) {
1011                                         rc = 0;
1012                                         goto ext_already_mapped;
1013                                 }
1014                         }
1015                         /*
1016                          * Fixup or reset m_lblk and m_len for un-mapped blocks.
1017                          * The second ldiskfs_map_blocks() will create and map
1018                          * them.
1019                          */
1020                         map.m_lblk = fp->index * blocks_per_page + total;
1021                         map.m_len = blen - total;
1022                 }
1023
1024 cont_map:
1025                 /**
1026                  * We might restart transaction for block allocations,
1027                  * in order to make sure data ordered mode, issue IO, disk
1028                  * size update and block allocations need be within same
1029                  * transaction to make sure consistency.
1030                  */
1031                 if (handle && check_credits) {
1032                         struct osd_thandle *oh;
1033
1034                         LASSERT(thandle != NULL);
1035                         oh = container_of(thandle, struct osd_thandle,
1036                                           ot_super);
1037                         /*
1038                          * only issue IO if restart transaction needed,
1039                          * as update disk size need hold inode lock, we
1040                          * want to avoid that as much as possible.
1041                          */
1042                         if (oh->oh_declared_ext <= 0) {
1043                                 rc = osd_ldiskfs_map_write(inode,
1044                                         iobuf, osd, start_blocks,
1045                                         count, &disk_size, user_size);
1046                                 if (rc)
1047                                         GOTO(cleanup, rc);
1048                                 thandle->th_restart_tran = 1;
1049                                 GOTO(cleanup, rc = -EAGAIN);
1050                         }
1051
1052                         if (CFS_FAIL_CHECK(OBD_FAIL_OST_RESTART_IO))
1053                                 oh->oh_declared_ext = 0;
1054                         else
1055                                 oh->oh_declared_ext--;
1056                 }
1057
1058                 time = ktime_get();
1059                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
1060                 time = ktime_sub(ktime_get(), time);
1061
1062                 if (rc >= 0) {
1063                         struct brw_stats *h = &osd->od_brw_stats;
1064                         int idx, c = 0;
1065
1066                         idx = map.m_flags & LDISKFS_MAP_NEW ?
1067                                 BRW_ALLOC_TIME : BRW_MAP_TIME;
1068                         lprocfs_oh_tally_log2_pcpu(&h->bs_hist[idx],
1069                                                    ktime_to_ms(time));
1070
1071                         for (; total < blen && c < map.m_len; c++, total++) {
1072                                 if (rc == 0) {
1073                                         *(blocks + total) = 0;
1074                                         total++;
1075                                         break;
1076                                 }
1077                                 if ((map.m_flags & LDISKFS_MAP_UNWRITTEN) &&
1078                                     !create) {
1079                                         /* don't try to read allocated, but
1080                                          * unwritten blocks, instead fill the
1081                                          * patches with zeros in osd_do_bio() */
1082                                         *(blocks + total) = 0;
1083                                         continue;
1084                                 }
1085                                 *(blocks + total) = map.m_pblk + c;
1086                                 /* unmap any possible underlying
1087                                  * metadata from the block device
1088                                  * mapping.  b=6998.
1089                                  */
1090                                 if ((map.m_flags & LDISKFS_MAP_NEW) &&
1091                                     create)
1092                                         clean_bdev_aliases(inode->i_sb->s_bdev,
1093                                                            map.m_pblk + c, 1);
1094                         }
1095                         rc = 0;
1096                 }
1097
1098 ext_already_mapped:
1099                 if (rc == 0 && create) {
1100                         count += (total - previous_total);
1101                         mapped_index = (count + blocks_per_page -
1102                                         1) / blocks_per_page - 1;
1103                         lnb1 = iobuf->dr_lnbs[i - clen];
1104                         lnb2 = iobuf->dr_lnbs[mapped_index];
1105                         size1 = lnb1->lnb_file_offset -
1106                                 (lnb1->lnb_file_offset % PAGE_SIZE) +
1107                                 (total << inode->i_blkbits);
1108                         size2 = lnb2->lnb_file_offset + lnb2->lnb_len;
1109
1110                         if (size1 > size2)
1111                                 size1 = size2;
1112                         if (size1 > disk_size)
1113                                 disk_size = size1;
1114                 }
1115
1116                 if (rc == 0 && total < blen) {
1117                         /*
1118                          * decay extent blocks if we could not
1119                          * allocate extent once.
1120                          */
1121                         osd_decay_extent_bytes(osd,
1122                                 (total - previous_total) << inode->i_blkbits);
1123                         map.m_lblk = fp->index * blocks_per_page + total;
1124                         map.m_len = blen - total;
1125                         previous_total = total;
1126                         goto cont_map;
1127                 }
1128                 if (rc != 0)
1129                         GOTO(cleanup, rc);
1130                 /*
1131                  * decay extent blocks if we could allocate
1132                  * good large extent.
1133                  */
1134                 if (total - previous_total >=
1135                     osd_extent_bytes(osd) >> inode->i_blkbits)
1136                         osd_decay_extent_bytes(osd,
1137                                 (total - previous_total) << inode->i_blkbits);
1138                 /* look for next extent */
1139                 fp = NULL;
1140                 blocks += blocks_per_page * clen;
1141         }
1142 cleanup:
1143         if (rc == 0 && create &&
1144             start_blocks < pages * blocks_per_page) {
1145                 rc = osd_ldiskfs_map_write(inode, iobuf, osd, start_blocks,
1146                                            count, &disk_size, user_size);
1147                 LASSERT(start_blocks + count == pages * blocks_per_page);
1148         }
1149         return rc;
1150 }
1151
1152 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1153                           struct niobuf_local *lnb, int npages)
1154 {
1155         struct osd_thread_info *oti   = osd_oti_get(env);
1156         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
1157         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
1158         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
1159         ktime_t start, end;
1160         s64 timediff;
1161         ssize_t isize;
1162         __s64  maxidx;
1163         int i, rc = 0;
1164
1165         LASSERT(inode);
1166
1167         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1168         if (unlikely(rc != 0))
1169                 RETURN(rc);
1170
1171         isize = i_size_read(inode);
1172         maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1173
1174         start = ktime_get();
1175         for (i = 0; i < npages; i++) {
1176
1177                 /*
1178                  * till commit the content of the page is undefined
1179                  * we'll set it uptodate once bulk is done. otherwise
1180                  * subsequent reads can access non-stable data
1181                  */
1182                 ClearPageUptodate(lnb[i].lnb_page);
1183
1184                 if (lnb[i].lnb_len == PAGE_SIZE)
1185                         continue;
1186
1187                 if (maxidx >= lnb[i].lnb_page->index) {
1188                         osd_iobuf_add_page(iobuf, &lnb[i]);
1189                 } else {
1190                         long off;
1191                         char *p = kmap(lnb[i].lnb_page);
1192
1193                         off = lnb[i].lnb_page_offset;
1194                         if (off)
1195                                 memset(p, 0, off);
1196                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1197                               ~PAGE_MASK;
1198                         if (off)
1199                                 memset(p + off, 0, PAGE_SIZE - off);
1200                         kunmap(lnb[i].lnb_page);
1201                 }
1202         }
1203         end = ktime_get();
1204         timediff = ktime_us_delta(end, start);
1205         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1206
1207         if (iobuf->dr_npages) {
1208                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1209                                                  0, 0, NULL);
1210                 if (likely(rc == 0)) {
1211                         rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1212                         /* do IO stats for preparation reads */
1213                         osd_fini_iobuf(osd, iobuf);
1214                 }
1215         }
1216         RETURN(rc);
1217 }
1218
1219 #ifdef KERNEL_DS
1220 #define DECLARE_MM_SEGMENT_T(name)             mm_segment_t name
1221 #define access_set_kernel(saved_fs, fei)                                \
1222 do {                                                                    \
1223         saved_fs = get_fs();                                            \
1224         set_fs(KERNEL_DS);                                              \
1225 } while (0)
1226 #define access_unset_kernel(saved_fs, fei)             set_fs((saved_fs))
1227 #else
1228 #define DECLARE_MM_SEGMENT_T(name)
1229 #define access_set_kernel(saved_fs, fei)                                \
1230         (fei)->fi_flags |= LDISKFS_FIEMAP_FLAG_MEMCPY
1231 #define access_unset_kernel(saved_fs, fei) \
1232         (fei)->fi_flags &= ~(LDISKFS_FIEMAP_FLAG_MEMCPY)
1233 #endif /* KERNEL_DS */
1234
1235 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1236                          struct ldiskfs_map_blocks *map)
1237 {
1238         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1239         int mapped;
1240         sector_t block = osd_i_blocks(inode, offset);
1241         sector_t end;
1242
1243         if (i_size_read(inode) == 0)
1244                 return 0;
1245
1246         /* Beyond EOF, must not be mapped */
1247         if ((i_size_read(inode) - 1) < offset)
1248                 return 0;
1249
1250         end = map->m_lblk + map->m_len;
1251         if (block >= map->m_lblk && block < end)
1252                 return map->m_flags & LDISKFS_MAP_MAPPED;
1253
1254         map->m_lblk = block;
1255         map->m_len = INT_MAX;
1256
1257         mapped = ldiskfs_map_blocks(NULL, inode, map, 0);
1258         if (mapped < 0) {
1259                 map->m_len = 0;
1260                 return 0;
1261         }
1262
1263         return map->m_flags & LDISKFS_MAP_MAPPED;
1264 }
1265
1266 #define MAX_EXTENTS_PER_WRITE 100
1267 static int osd_declare_write_commit(const struct lu_env *env,
1268                                     struct dt_object *dt,
1269                                     struct niobuf_local *lnb, int npages,
1270                                     struct thandle *handle)
1271 {
1272         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1273         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1274         struct osd_thandle      *oh;
1275         int                     extents = 0, new_meta = 0;
1276         int                     depth, new_blocks = 0;
1277         int                     i;
1278         int                     dirty_groups = 0;
1279         int                     rc = 0;
1280         int                     credits = 0;
1281         long long               quota_space = 0;
1282         struct ldiskfs_map_blocks map;
1283         enum osd_quota_local_flags local_flags = 0;
1284         enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1285         unsigned int            extent_bytes;
1286         loff_t extent_start = 0;
1287         loff_t extent_end = 0;
1288         ENTRY;
1289
1290         LASSERT(handle != NULL);
1291         oh = container_of(handle, struct osd_thandle, ot_super);
1292         LASSERT(oh->ot_handle == NULL);
1293
1294         /*
1295          * We track a decaying average extent blocks per filesystem,
1296          * for most of time, it will be 1M, with filesystem becoming
1297          * heavily-fragmented, it will be reduced to 4K at the worst.
1298          */
1299         extent_bytes = osd_extent_bytes(osd);
1300         LASSERT(extent_bytes >= osd_sb(osd)->s_blocksize);
1301
1302         /* calculate number of extents (probably better to pass nb) */
1303         for (i = 0; i < npages; i++) {
1304                 /* ignore quota for the whole request if any page is from
1305                  * client cache or written by root.
1306                  *
1307                  * XXX we could handle this on per-lnb basis as done by
1308                  * grant.
1309                  */
1310                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1311                     (lnb[i].lnb_flags & OBD_BRW_SYS_RESOURCE) ||
1312                     !(lnb[i].lnb_flags & OBD_BRW_SYNC))
1313                         declare_flags |= OSD_QID_FORCE;
1314
1315                 /*
1316                  * Convert unwritten extent might need split extents, could
1317                  * not skip it.
1318                  */
1319                 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &map) &&
1320                     !(map.m_flags & LDISKFS_MAP_UNWRITTEN)) {
1321                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1322                         continue;
1323                 }
1324
1325                 if (lnb[i].lnb_flags & OBD_BRW_DONE) {
1326                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1327                         continue;
1328                 }
1329
1330                 /* count only unmapped changes */
1331                 new_blocks++;
1332                 if (lnb[i].lnb_file_offset != extent_end || extent_end == 0) {
1333                         if (extent_end != 0)
1334                                 extents += (extent_end - extent_start +
1335                                             extent_bytes - 1) / extent_bytes;
1336                         extent_start = lnb[i].lnb_file_offset;
1337                         extent_end = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1338                 } else {
1339                         extent_end += lnb[i].lnb_len;
1340                 }
1341
1342                 quota_space += PAGE_SIZE;
1343         }
1344
1345         credits++; /* inode */
1346         /*
1347          * overwrite case, no need to modify tree and
1348          * allocate blocks.
1349          */
1350         if (!extent_end)
1351                 goto out_declare;
1352
1353         extents += (extent_end - extent_start +
1354                     extent_bytes - 1) / extent_bytes;
1355         /**
1356          * with system space usage growing up, mballoc codes won't
1357          * try best to scan block group to align best free extent as
1358          * we can. So extent bytes per extent could be decayed to a
1359          * very small value, this could make us reserve too many credits.
1360          * We could be more optimistic in the credit reservations, even
1361          * in a case where the filesystem is nearly full, it is extremely
1362          * unlikely that the worst case would ever be hit.
1363          */
1364         if (extents > MAX_EXTENTS_PER_WRITE)
1365                 extents = MAX_EXTENTS_PER_WRITE;
1366
1367         /**
1368          * If we add a single extent, then in the worse case, each tree
1369          * level index/leaf need to be changed in case of the tree split.
1370          * If more extents are inserted, they could cause the whole tree
1371          * split more than once, but this is really rare.
1372          */
1373         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1374                 /*
1375                  * many concurrent threads may grow tree by the time
1376                  * our transaction starts. so, consider 2 is a min depth.
1377                  */
1378                 depth = ext_depth(inode);
1379                 depth = min(max(depth, 1) + 1, LDISKFS_MAX_EXTENT_DEPTH);
1380                 if (extents <= 1) {
1381                         credits += depth * 2 * extents;
1382                         new_meta = depth;
1383                 } else {
1384                         credits += depth * 3 * extents;
1385                         new_meta = depth * 2 * extents;
1386                 }
1387         } else {
1388                 /*
1389                  * With N contiguous data blocks, we need at most
1390                  * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
1391                  * 2 dindirect blocks, and 1 tindirect block
1392                  */
1393                 new_meta = DIV_ROUND_UP(new_blocks,
1394                                 LDISKFS_ADDR_PER_BLOCK(inode->i_sb)) + 4;
1395                 credits += new_meta;
1396         }
1397         dirty_groups += (extents + new_meta);
1398
1399         oh->oh_declared_ext = extents;
1400
1401         /* quota space for metadata blocks */
1402         quota_space += new_meta * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1403
1404         /* quota space should be reported in 1K blocks */
1405         quota_space = toqb(quota_space);
1406
1407         /* each new block can go in different group (bitmap + gd) */
1408
1409         /* we can't dirty more bitmap blocks than exist */
1410         if (dirty_groups > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1411                 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1412         else
1413                 credits += dirty_groups;
1414
1415         /* we can't dirty more gd blocks than exist */
1416         if (dirty_groups > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1417                 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1418         else
1419                 credits += dirty_groups;
1420
1421         CDEBUG(D_INODE,
1422                "%s: inode #%lu extent_bytes %u extents %d credits %d\n",
1423                osd_ino2name(inode), inode->i_ino, extent_bytes, extents,
1424                credits);
1425
1426 out_declare:
1427         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1428
1429         /* make sure the over quota flags were not set */
1430         lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1431
1432         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1433                                    i_projid_read(inode), quota_space, oh,
1434                                    osd_dt_obj(dt), &local_flags, declare_flags);
1435
1436         /* we need only to store the overquota flags in the first lnb for
1437          * now, once we support multiple objects BRW, this code needs be
1438          * revised.
1439          */
1440         if (local_flags & QUOTA_FL_OVER_USRQUOTA)
1441                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1442         if (local_flags & QUOTA_FL_OVER_GRPQUOTA)
1443                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1444         if (local_flags & QUOTA_FL_OVER_PRJQUOTA)
1445                 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1446         if (local_flags & QUOTA_FL_ROOT_PRJQUOTA)
1447                 lnb[0].lnb_flags |= OBD_BRW_ROOT_PRJQUOTA;
1448
1449         if (rc == 0)
1450                 rc = osd_trunc_lock(osd_dt_obj(dt), oh, true);
1451
1452         RETURN(rc);
1453 }
1454
1455 /* Check if a block is allocated or not */
1456 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1457                             struct niobuf_local *lnb, int npages,
1458                             struct thandle *thandle, __u64 user_size)
1459 {
1460         struct osd_thread_info *oti = osd_oti_get(env);
1461         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1462         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1463         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1464         int rc = 0, i, check_credits = 0;
1465
1466         LASSERT(inode);
1467
1468         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1469         if (unlikely(rc != 0))
1470                 RETURN(rc);
1471
1472         dquot_initialize(inode);
1473
1474         for (i = 0; i < npages; i++) {
1475                 if (lnb[i].lnb_rc == -ENOSPC &&
1476                     (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1477                         /* Allow the write to proceed if overwriting an
1478                          * existing block
1479                          */
1480                         lnb[i].lnb_rc = 0;
1481                 }
1482
1483                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1484                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1485                                lnb[i].lnb_rc);
1486                         LASSERT(lnb[i].lnb_page);
1487                         generic_error_remove_page(inode->i_mapping,
1488                                                   lnb[i].lnb_page);
1489                         continue;
1490                 }
1491
1492                 if (lnb[i].lnb_flags & OBD_BRW_DONE)
1493                         continue;
1494
1495                 if (!(lnb[i].lnb_flags & OBD_BRW_MAPPED))
1496                         check_credits = 1;
1497
1498                 LASSERT(PageLocked(lnb[i].lnb_page));
1499                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1500
1501                 /*
1502                  * Since write and truncate are serialized by oo_sem, even
1503                  * partial-page truncate should not leave dirty pages in the
1504                  * page cache.
1505                  */
1506                 LASSERT(!PageDirty(lnb[i].lnb_page));
1507
1508                 SetPageUptodate(lnb[i].lnb_page);
1509
1510                 osd_iobuf_add_page(iobuf, &lnb[i]);
1511         }
1512
1513         osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1514
1515         if (CFS_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1516                 rc = -ENOSPC;
1517         } else if (iobuf->dr_npages > 0) {
1518                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd,
1519                                                  1, user_size,
1520                                                  check_credits,
1521                                                  thandle);
1522         } else {
1523                 /* no pages to write, no transno is needed */
1524                 thandle->th_local = 1;
1525         }
1526
1527         if (rc != 0 && !thandle->th_restart_tran)
1528                 osd_fini_iobuf(osd, iobuf);
1529
1530         osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1531
1532         if (unlikely(rc != 0 && !thandle->th_restart_tran)) {
1533                 /* if write fails, we should drop pages from the cache */
1534                 for (i = 0; i < npages; i++) {
1535                         if (lnb[i].lnb_page == NULL)
1536                                 continue;
1537                         if (!PagePrivate2(lnb[i].lnb_page)) {
1538                                 LASSERT(PageLocked(lnb[i].lnb_page));
1539                                 generic_error_remove_page(inode->i_mapping,
1540                                                           lnb[i].lnb_page);
1541                         }
1542                 }
1543         }
1544
1545         RETURN(rc);
1546 }
1547
1548 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1549                          struct niobuf_local *lnb, int npages)
1550 {
1551         struct osd_thread_info *oti = osd_oti_get(env);
1552         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1553         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1554         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1555         int rc = 0, i, cache_hits = 0, cache_misses = 0;
1556         ktime_t start, end;
1557         s64 timediff;
1558         loff_t isize;
1559
1560         LASSERT(inode);
1561
1562         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1563         if (unlikely(rc != 0))
1564                 RETURN(rc);
1565
1566         isize = i_size_read(inode);
1567
1568         start = ktime_get();
1569         for (i = 0; i < npages; i++) {
1570
1571                 if (isize <= lnb[i].lnb_file_offset)
1572                         /* If there's no more data, abort early.
1573                          * lnb->lnb_rc == 0, so it's easy to detect later.
1574                          */
1575                         break;
1576
1577                 /* instead of looking if we go beyong isize, send complete
1578                  * pages all the time
1579                  */
1580                 lnb[i].lnb_rc = lnb[i].lnb_len;
1581
1582                 /* Bypass disk read if fail_loc is set properly */
1583                 if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_OST_FAKE_RW))
1584                         SetPageUptodate(lnb[i].lnb_page);
1585
1586                 if (PageUptodate(lnb[i].lnb_page)) {
1587                         cache_hits++;
1588                         unlock_page(lnb[i].lnb_page);
1589                 } else {
1590                         cache_misses++;
1591                         osd_iobuf_add_page(iobuf, &lnb[i]);
1592                 }
1593                 /* no need to unlock in osd_bufs_put(), the sooner page is
1594                  * unlocked, the earlier another client can access it.
1595                  * notice real unlock_page() can be called few lines
1596                  * below after osd_do_bio(). lnb is a per-thread, so it's
1597                  * fine to have PG_locked and lnb_locked inconsistent here
1598                  */
1599                 lnb[i].lnb_locked = 0;
1600         }
1601         end = ktime_get();
1602         timediff = ktime_us_delta(end, start);
1603         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1604
1605         if (cache_hits != 0)
1606                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1607                                     cache_hits);
1608         if (cache_misses != 0)
1609                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1610                                     cache_misses);
1611         if (cache_hits + cache_misses != 0)
1612                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1613                                     cache_hits + cache_misses);
1614
1615         if (iobuf->dr_npages) {
1616                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1617                                                  0, 0, NULL);
1618                 if (!rc)
1619                         rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1620
1621                 /* IO stats will be done in osd_bufs_put() */
1622
1623                 /* early release to let others read data during the bulk */
1624                 for (i = 0; i < iobuf->dr_npages; i++) {
1625                         LASSERT(PageLocked(iobuf->dr_pages[i]));
1626                         if (!PagePrivate2(iobuf->dr_pages[i]))
1627                                 unlock_page(iobuf->dr_pages[i]);
1628                 }
1629         }
1630
1631         RETURN(rc);
1632 }
1633
1634 /*
1635  * XXX: Another layering violation for now.
1636  *
1637  * We don't want to use ->f_op->read methods, because generic file write
1638  *
1639  *         - serializes on ->i_sem, and
1640  *
1641  *         - does a lot of extra work like balance_dirty_pages(),
1642  *
1643  * which doesn't work for globally shared files like /last_rcvd.
1644  */
1645 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1646 {
1647         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1648
1649         memcpy(buffer, (char *)ei->i_data, buflen);
1650
1651         return  buflen;
1652 }
1653
1654 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1655 {
1656         struct buffer_head *bh;
1657         unsigned long block;
1658         int osize;
1659         int blocksize;
1660         int csize;
1661         int boffs;
1662
1663         /* prevent reading after eof */
1664         spin_lock(&inode->i_lock);
1665         if (i_size_read(inode) < *offs + size) {
1666                 loff_t diff = i_size_read(inode) - *offs;
1667
1668                 spin_unlock(&inode->i_lock);
1669                 if (diff < 0) {
1670                         CDEBUG(D_OTHER,
1671                                "size %llu is too short to read @%llu\n",
1672                                i_size_read(inode), *offs);
1673                         return -EBADR;
1674                 } else if (diff == 0) {
1675                         return 0;
1676                 } else {
1677                         size = diff;
1678                 }
1679         } else {
1680                 spin_unlock(&inode->i_lock);
1681         }
1682
1683         blocksize = 1 << inode->i_blkbits;
1684         osize = size;
1685         while (size > 0) {
1686                 block = *offs >> inode->i_blkbits;
1687                 boffs = *offs & (blocksize - 1);
1688                 csize = min(blocksize - boffs, size);
1689                 bh = __ldiskfs_bread(NULL, inode, block, 0);
1690                 if (IS_ERR(bh)) {
1691                         CERROR("%s: can't read %u@%llu on ino %lu: rc = %ld\n",
1692                                osd_ino2name(inode), csize, *offs, inode->i_ino,
1693                                PTR_ERR(bh));
1694                         return PTR_ERR(bh);
1695                 }
1696
1697                 if (bh != NULL) {
1698                         memcpy(buf, bh->b_data + boffs, csize);
1699                         brelse(bh);
1700                 } else {
1701                         memset(buf, 0, csize);
1702                 }
1703
1704                 *offs += csize;
1705                 buf += csize;
1706                 size -= csize;
1707         }
1708         return osize;
1709 }
1710
1711 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1712                         struct lu_buf *buf, loff_t *pos)
1713 {
1714         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1715         int rc;
1716
1717         /* Read small symlink from inode body as we need to maintain correct
1718          * on-disk symlinks for ldiskfs.
1719          */
1720         if (S_ISLNK(dt->do_lu.lo_header->loh_attr)) {
1721                 loff_t size = i_size_read(inode);
1722
1723                 if (buf->lb_len < size)
1724                         return -EOVERFLOW;
1725
1726                 if (size < sizeof(LDISKFS_I(inode)->i_data))
1727                         rc = osd_ldiskfs_readlink(inode, buf->lb_buf, size);
1728                 else
1729                         rc = osd_ldiskfs_read(inode, buf->lb_buf, size, pos);
1730         } else {
1731                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1732         }
1733
1734         return rc;
1735 }
1736
1737 static inline int osd_extents_enabled(struct super_block *sb,
1738                                       struct inode *inode)
1739 {
1740         if (inode != NULL) {
1741                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1742                         return 1;
1743         } else if (ldiskfs_has_feature_extents(sb)) {
1744                 return 1;
1745         }
1746         return 0;
1747 }
1748
1749 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1750                            const loff_t size, const loff_t pos,
1751                            const int blocks)
1752 {
1753         int credits, bits, bs, i;
1754
1755         bits = sb->s_blocksize_bits;
1756         bs = 1 << bits;
1757
1758         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1759          * we do not expect blockmaps on the large files,
1760          * so let's shrink it to 2 levels (4GB files)
1761          */
1762
1763         /* this is default reservation: 2 levels */
1764         credits = (blocks + 2) * 3;
1765
1766         /* actual offset is unknown, hard to optimize */
1767         if (pos == -1)
1768                 return credits;
1769
1770         /* now check for few specific cases to optimize */
1771         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1772                 /* no indirects */
1773                 credits = blocks;
1774                 /* allocate if not allocated */
1775                 if (inode == NULL) {
1776                         credits += blocks * 2;
1777                         return credits;
1778                 }
1779                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1780                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1781                         if (LDISKFS_I(inode)->i_data[i] == 0)
1782                                 credits += 2;
1783                 }
1784         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1785                 /* single indirect */
1786                 credits = blocks * 3;
1787                 if (inode == NULL ||
1788                     LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1789                         credits += 3;
1790                 else
1791                         /* The indirect block may be modified. */
1792                         credits += 1;
1793         }
1794
1795         return credits;
1796 }
1797
1798 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1799                                  const struct lu_buf *buf, loff_t _pos,
1800                                  struct thandle *handle)
1801 {
1802         struct osd_object  *obj  = osd_dt_obj(dt);
1803         struct inode       *inode = obj->oo_inode;
1804         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1805         struct osd_thandle *oh;
1806         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1807         int                 bits, bs;
1808         int                 depth, size;
1809         loff_t              pos;
1810         ENTRY;
1811
1812         LASSERT(buf != NULL);
1813         LASSERT(handle != NULL);
1814
1815         oh = container_of(handle, struct osd_thandle, ot_super);
1816         LASSERT(oh->ot_handle == NULL);
1817
1818         size = buf->lb_len;
1819         bits = sb->s_blocksize_bits;
1820         bs = 1 << bits;
1821
1822         if (osd_tx_was_declared(env, oh, dt, DTO_WRITE_BASE, _pos))
1823                 RETURN(0);
1824
1825         if (_pos == -1) {
1826                 /* if this is an append, then we
1827                  * should expect cross-block record
1828                  */
1829                 pos = 0;
1830         } else {
1831                 pos = _pos;
1832         }
1833
1834         /* blocks to modify */
1835         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1836         LASSERT(blocks > 0);
1837
1838         if (inode != NULL && _pos != -1) {
1839                 /* object size in blocks */
1840                 est = (i_size_read(inode) + bs - 1) >> bits;
1841                 allocated = inode->i_blocks >> (bits - 9);
1842                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1843                         /* looks like an overwrite, no need to modify tree */
1844                         credits = blocks;
1845                         /* no need to modify i_size */
1846                         goto out;
1847                 }
1848         }
1849
1850         if (osd_extents_enabled(sb, inode)) {
1851                 /*
1852                  * many concurrent threads may grow tree by the time
1853                  * our transaction starts. so, consider 2 is a min depth
1854                  * for every level we may need to allocate a new block
1855                  * and take some entries from the old one. so, 3 blocks
1856                  * to allocate (bitmap, gd, itself) + old block - 4 per
1857                  * level.
1858                  */
1859                 depth = inode != NULL ? ext_depth(inode) : 0;
1860                 depth = min(max(depth, 1) + 3, LDISKFS_MAX_EXTENT_DEPTH);
1861                 credits = depth;
1862                 /* if not append, then split may need to modify
1863                  * existing blocks moving entries into the new ones
1864                  */
1865                 if (_pos != -1)
1866                         credits += depth;
1867                 /* blocks to store data: bitmap,gd,itself */
1868                 credits += blocks * 3;
1869         } else {
1870                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1871         }
1872         /* if inode is created as part of the transaction,
1873          * then it's counted already by the creation method
1874          */
1875         if (inode != NULL)
1876                 credits++;
1877
1878 out:
1879
1880         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1881
1882         /* dt_declare_write() is usually called for system objects, such
1883          * as llog or last_rcvd files. We needn't enforce quota on those
1884          * objects, so always set the lqi_space as 0.
1885          */
1886         if (inode != NULL)
1887                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1888                                            i_gid_read(inode),
1889                                            i_projid_read(inode), 0,
1890                                            oh, obj, NULL, OSD_QID_BLK);
1891
1892         if (rc == 0)
1893                 rc = osd_trunc_lock(obj, oh, true);
1894
1895         RETURN(rc);
1896 }
1897
1898 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1899 {
1900         /* LU-2634: clear the extent format for fast symlink */
1901         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1902
1903         /* Copying the NUL byte terminating the link target as well */
1904         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen + 1);
1905         spin_lock(&inode->i_lock);
1906         LDISKFS_I(inode)->i_disksize = buflen;
1907         i_size_write(inode, buflen);
1908         spin_unlock(&inode->i_lock);
1909         osd_dirty_inode(inode, I_DIRTY_DATASYNC);
1910
1911         return 0;
1912 }
1913
1914 static int osd_ldiskfs_write_record(struct dt_object *dt, void *buf,
1915                                     int bufsize, int write_NUL, loff_t *offs,
1916                                     handle_t *handle)
1917 {
1918         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1919         struct buffer_head *bh        = NULL;
1920         loff_t              offset    = *offs;
1921         loff_t              new_size  = i_size_read(inode);
1922         unsigned long       block;
1923         int                 blocksize = 1 << inode->i_blkbits;
1924         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1925         int                 err = 0;
1926         int                 size;
1927         int                 boffs;
1928         int                 dirty_inode = 0;
1929         bool create, sparse, sync = false;
1930
1931         if (write_NUL) {
1932                 /*
1933                  * long symlink write does not count the NUL terminator in
1934                  * bufsize, we write it, and the inode's file size does not
1935                  * count the NUL terminator as well.
1936                  */
1937                 ((char *)buf)[bufsize] = '\0';
1938                 ++bufsize;
1939         }
1940
1941         /* only the first flag-set matters */
1942         dirty_inode = !test_and_set_bit(LDISKFS_INODE_JOURNAL_DATA,
1943                                        &ei->i_flags);
1944
1945         /* sparse checking is racy, but sparse is very rare case, leave as is */
1946         sparse = (new_size > 0 && (inode->i_blocks >> (inode->i_blkbits - 9)) <
1947                   ((new_size - 1) >> inode->i_blkbits) + 1);
1948
1949         while (bufsize > 0) {
1950                 int credits = handle->h_buffer_credits;
1951                 unsigned long last_block = (new_size == 0) ? 0 :
1952                                            (new_size - 1) >> inode->i_blkbits;
1953
1954                 if (bh)
1955                         brelse(bh);
1956
1957                 block = offset >> inode->i_blkbits;
1958                 boffs = offset & (blocksize - 1);
1959                 size = min(blocksize - boffs, bufsize);
1960                 sync = (block > last_block || new_size == 0 || sparse);
1961
1962                 if (sync)
1963                         down(&ei->i_append_sem);
1964
1965                 bh = __ldiskfs_bread(handle, inode, block, 0);
1966
1967                 if (unlikely(IS_ERR_OR_NULL(bh) && !sync))
1968                         CWARN(
1969                               "%s: adding bh without locking off %llu (block %lu, size %d, offs %llu)\n",
1970                               osd_ino2name(inode),
1971                               offset, block, bufsize, *offs);
1972
1973                 if (IS_ERR_OR_NULL(bh)) {
1974                         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1975                         int flags = LDISKFS_GET_BLOCKS_CREATE;
1976
1977                         /* while the file system is being mounted, avoid
1978                          * preallocation otherwise mount can take a long
1979                          * time as mballoc cache is cold.
1980                          * XXX: this is a workaround until we have a proper
1981                          *      fix in mballoc
1982                          * XXX: works with extent-based files only */
1983                         if (!osd->od_cl_seq)
1984                                 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
1985                         bh = __ldiskfs_bread(handle, inode, block, flags);
1986                         create = true;
1987                 } else {
1988                         if (sync) {
1989                                 up(&ei->i_append_sem);
1990                                 sync = false;
1991                         }
1992                         create = false;
1993                 }
1994                 if (IS_ERR_OR_NULL(bh)) {
1995                         if (bh == NULL) {
1996                                 err = -EIO;
1997                         } else {
1998                                 err = PTR_ERR(bh);
1999                                 bh = NULL;
2000                         }
2001
2002                         CERROR(
2003                                "%s: error reading offset %llu (block %lu, size %d, offs %llu), credits %d/%d: rc = %d\n",
2004                                osd_ino2name(inode), offset, block, bufsize,
2005                                *offs, credits, handle->h_buffer_credits, err);
2006                         break;
2007                 }
2008
2009                 err = osd_ldiskfs_journal_get_write_access(handle, inode->i_sb,
2010                                                            bh,
2011                                                            LDISKFS_JTR_NONE);
2012                 if (err) {
2013                         CERROR("journal_get_write_access() returned error %d\n",
2014                                err);
2015                         break;
2016                 }
2017                 LASSERTF(boffs + size <= bh->b_size,
2018                          "boffs %d size %d bh->b_size %lu\n",
2019                          boffs, size, (unsigned long)bh->b_size);
2020                 if (create) {
2021                         memset(bh->b_data, 0, bh->b_size);
2022                         if (sync) {
2023                                 up(&ei->i_append_sem);
2024                                 sync = false;
2025                         }
2026                 }
2027                 memcpy(bh->b_data + boffs, buf, size);
2028                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2029                 if (err)
2030                         break;
2031
2032                 if (offset + size > new_size)
2033                         new_size = offset + size;
2034                 offset += size;
2035                 bufsize -= size;
2036                 buf += size;
2037         }
2038         if (sync)
2039                 up(&ei->i_append_sem);
2040
2041         if (bh)
2042                 brelse(bh);
2043
2044         if (write_NUL)
2045                 --new_size;
2046         /* correct in-core and on-disk sizes */
2047         if (new_size > i_size_read(inode)) {
2048                 spin_lock(&inode->i_lock);
2049                 if (new_size > i_size_read(inode))
2050                         i_size_write(inode, new_size);
2051                 if (i_size_read(inode) > ei->i_disksize) {
2052                         ei->i_disksize = i_size_read(inode);
2053                         dirty_inode = 1;
2054                 }
2055                 spin_unlock(&inode->i_lock);
2056         }
2057         if (dirty_inode)
2058                 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2059
2060         if (err == 0)
2061                 *offs = offset;
2062         return err;
2063 }
2064
2065 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
2066                          const struct lu_buf *buf, loff_t *pos,
2067                          struct thandle *handle)
2068 {
2069         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
2070         struct osd_thandle      *oh;
2071         ssize_t                 result;
2072         int                     is_link;
2073
2074         LASSERT(dt_object_exists(dt));
2075
2076         LASSERT(handle != NULL);
2077         LASSERT(inode != NULL);
2078         dquot_initialize(inode);
2079
2080         /* XXX: don't check: one declared chunk can be used many times */
2081         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
2082
2083         oh = container_of(handle, struct osd_thandle, ot_super);
2084         LASSERT(oh->ot_handle->h_transaction != NULL);
2085         osd_trans_exec_op(env, handle, OSD_OT_WRITE);
2086
2087         /* Write small symlink to inode body as we need to maintain correct
2088          * on-disk symlinks for ldiskfs.
2089          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
2090          * does not count it in.
2091          */
2092         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
2093         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
2094                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
2095         else
2096                 result = osd_ldiskfs_write_record(dt, buf->lb_buf, buf->lb_len,
2097                                                   is_link, pos, oh->ot_handle);
2098         if (result == 0)
2099                 result = buf->lb_len;
2100
2101         osd_trans_exec_check(env, handle, OSD_OT_WRITE);
2102
2103         return result;
2104 }
2105
2106 static int osd_declare_fallocate(const struct lu_env *env,
2107                                  struct dt_object *dt, __u64 start, __u64 end,
2108                                  int mode, struct thandle *th)
2109 {
2110         struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2111         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2112         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2113         long long quota_space = 0;
2114         /* 5 is max tree depth. (inode + 4 index blocks) */
2115         int depth = 5;
2116         int rc;
2117
2118         ENTRY;
2119
2120         /*
2121          * mode == 0 (which is standard prealloc) and PUNCH is supported
2122          * Rest of mode options is not supported yet.
2123          */
2124         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2125                 RETURN(-EOPNOTSUPP);
2126
2127         /* disable fallocate completely */
2128         if (osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks < 0)
2129                 RETURN(-EOPNOTSUPP);
2130
2131         LASSERT(th);
2132         LASSERT(inode);
2133
2134         if (mode & FALLOC_FL_PUNCH_HOLE) {
2135                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
2136                                            i_gid_read(inode),
2137                                            i_projid_read(inode), 0, oh,
2138                                            osd_dt_obj(dt), NULL, OSD_QID_BLK);
2139                 if (rc == 0)
2140                         rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
2141                 RETURN(rc);
2142         }
2143
2144         /* quota space for metadata blocks
2145          * approximate metadata estimate should be good enough.
2146          */
2147         quota_space += PAGE_SIZE;
2148         quota_space += depth * LDISKFS_BLOCK_SIZE(osd_sb(osd));
2149
2150         /* quota space should be reported in 1K blocks */
2151         quota_space = toqb(quota_space) + toqb(end - start) +
2152                       LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
2153
2154         /* We don't need to reserve credits for whole fallocate here.
2155          * We reserve space only for metadata. Fallocate credits are
2156          * extended as required
2157          */
2158         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2159                                    i_projid_read(inode), quota_space, oh,
2160                                    osd_dt_obj(dt), NULL, OSD_QID_BLK);
2161         RETURN(rc);
2162 }
2163
2164 static int osd_fallocate_preallocate(const struct lu_env *env,
2165                                      struct dt_object *dt,
2166                                      __u64 start, __u64 end, int mode,
2167                                      struct thandle *th)
2168 {
2169         struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2170         handle_t *handle = ldiskfs_journal_current_handle();
2171         unsigned int save_credits = oh->ot_credits;
2172         struct osd_object *obj = osd_dt_obj(dt);
2173         struct inode *inode = obj->oo_inode;
2174         struct ldiskfs_map_blocks map;
2175         unsigned int credits;
2176         ldiskfs_lblk_t blen;
2177         ldiskfs_lblk_t boff;
2178         loff_t new_size = 0;
2179         int depth = 0;
2180         int flags;
2181         int rc = 0;
2182
2183         ENTRY;
2184
2185         LASSERT(dt_object_exists(dt));
2186         LASSERT(osd_invariant(obj));
2187         LASSERT(inode != NULL);
2188
2189         CDEBUG(D_INODE, "fallocate: inode #%lu: start %llu end %llu mode %d\n",
2190                inode->i_ino, start, end, mode);
2191
2192         dquot_initialize(inode);
2193
2194         LASSERT(th);
2195
2196         boff = osd_i_blocks(inode, start);
2197         blen = osd_i_blocks(inode, ALIGN(end, 1 << inode->i_blkbits)) - boff;
2198
2199         /* Create and mark new extents as either zero or unwritten */
2200         flags = (osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks ||
2201                  !ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)) ?
2202                 LDISKFS_GET_BLOCKS_CREATE_ZERO :
2203                 LDISKFS_GET_BLOCKS_CREATE_UNWRIT_EXT;
2204 #ifdef LDISKFS_GET_BLOCKS_KEEP_SIZE
2205         if (mode & FALLOC_FL_KEEP_SIZE)
2206                 flags |= LDISKFS_GET_BLOCKS_KEEP_SIZE;
2207 #endif
2208         inode_lock(inode);
2209
2210         if (!(mode & FALLOC_FL_KEEP_SIZE) && (end > i_size_read(inode) ||
2211             end > LDISKFS_I(inode)->i_disksize)) {
2212                 new_size = end;
2213                 rc = inode_newsize_ok(inode, new_size);
2214                 if (rc)
2215                         GOTO(out, rc);
2216         }
2217
2218         inode_dio_wait(inode);
2219
2220         map.m_lblk = boff;
2221         map.m_len = blen;
2222
2223         /* Don't normalize the request if it can fit in one extent so
2224          * that it doesn't get unnecessarily split into multiple extents.
2225          */
2226         if (blen <= EXT_UNWRITTEN_MAX_LEN)
2227                 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
2228
2229         /*
2230          * credits to insert 1 extent into extent tree.
2231          */
2232         credits = ldiskfs_chunk_trans_blocks(inode, blen);
2233         depth = ext_depth(inode);
2234
2235         while (rc >= 0 && blen) {
2236                 loff_t epos;
2237
2238                 /*
2239                  * Recalculate credits when extent tree depth changes.
2240                  */
2241                 if (depth != ext_depth(inode)) {
2242                         credits = ldiskfs_chunk_trans_blocks(inode, blen);
2243                         depth = ext_depth(inode);
2244                 }
2245
2246                 /* TODO: quota check */
2247                 rc = osd_extend_restart_trans(handle, credits, inode);
2248                 if (rc)
2249                         break;
2250
2251                 rc = ldiskfs_map_blocks(handle, inode, &map, flags);
2252                 if (rc <= 0) {
2253                         CDEBUG(D_INODE,
2254                                "inode #%lu: block %u: len %u: ldiskfs_map_blocks returned %d\n",
2255                                inode->i_ino, map.m_lblk, map.m_len, rc);
2256                         ldiskfs_mark_inode_dirty(handle, inode);
2257                         break;
2258                 }
2259
2260                 map.m_lblk += rc;
2261                 map.m_len = blen = blen - rc;
2262                 epos = (loff_t)map.m_lblk << inode->i_blkbits;
2263                 inode->i_ctime = current_time(inode);
2264                 if (new_size) {
2265                         if (epos > end)
2266                                 epos = end;
2267                         if (ldiskfs_update_inode_size(inode, epos) & 0x1)
2268                                 inode->i_mtime = inode->i_ctime;
2269 #ifdef LDISKFS_EOFBLOCKS_FL
2270                 } else {
2271                         if (epos > inode->i_size)
2272                                 ldiskfs_set_inode_flag(inode,
2273                                                        LDISKFS_INODE_EOFBLOCKS);
2274 #endif
2275                 }
2276
2277                 ldiskfs_mark_inode_dirty(handle, inode);
2278         }
2279
2280 out:
2281         /* extand credits if needed for operations such as attribute set */
2282         if (rc >= 0)
2283                 rc = osd_extend_restart_trans(handle, save_credits, inode);
2284
2285         inode_unlock(inode);
2286
2287         RETURN(rc);
2288 }
2289
2290 static int osd_fallocate_punch(const struct lu_env *env, struct dt_object *dt,
2291                                __u64 start, __u64 end, int mode,
2292                                struct thandle *th)
2293 {
2294         struct osd_object *obj = osd_dt_obj(dt);
2295         struct inode *inode = obj->oo_inode;
2296         struct osd_access_lock *al;
2297         struct osd_thandle *oh;
2298         int rc = 0, found = 0;
2299
2300         ENTRY;
2301
2302         LASSERT(dt_object_exists(dt));
2303         LASSERT(osd_invariant(obj));
2304         LASSERT(inode != NULL);
2305
2306         dquot_initialize(inode);
2307
2308         LASSERT(th);
2309         oh = container_of(th, struct osd_thandle, ot_super);
2310         LASSERT(oh->ot_handle->h_transaction != NULL);
2311
2312         list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2313                 if (obj != al->tl_obj)
2314                         continue;
2315                 LASSERT(al->tl_shared == 0);
2316                 found = 1;
2317                 /* do actual punch in osd_trans_stop() */
2318                 al->tl_start = start;
2319                 al->tl_end = end;
2320                 al->tl_mode = mode;
2321                 al->tl_punch = true;
2322                 break;
2323         }
2324
2325         RETURN(rc);
2326 }
2327
2328 static int osd_fallocate(const struct lu_env *env, struct dt_object *dt,
2329                          __u64 start, __u64 end, int mode, struct thandle *th)
2330 {
2331         int rc;
2332
2333         ENTRY;
2334
2335         if (mode & FALLOC_FL_PUNCH_HOLE) {
2336                 /* punch */
2337                 rc = osd_fallocate_punch(env, dt, start, end, mode, th);
2338         } else {
2339                 /* standard preallocate */
2340                 rc = osd_fallocate_preallocate(env, dt, start, end, mode, th);
2341         }
2342         RETURN(rc);
2343 }
2344
2345 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
2346                              __u64 start, __u64 end, struct thandle *th)
2347 {
2348         struct osd_thandle *oh;
2349         struct osd_object  *obj = osd_dt_obj(dt);
2350         struct inode       *inode;
2351         int                 rc;
2352         ENTRY;
2353
2354         LASSERT(th);
2355         oh = container_of(th, struct osd_thandle, ot_super);
2356
2357         /*
2358          * we don't need to reserve credits for whole truncate
2359          * it's not possible as truncate may need to free too many
2360          * blocks and that won't fit a single transaction. instead
2361          * we reserve credits to change i_size and put inode onto
2362          * orphan list. if needed truncate will extend or restart
2363          * transaction
2364          */
2365         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
2366                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
2367
2368         inode = obj->oo_inode;
2369         LASSERT(inode);
2370
2371         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2372                                    i_projid_read(inode), 0, oh, obj,
2373                                    NULL, OSD_QID_BLK);
2374
2375         /* if object holds encrypted content, we need to make sure we truncate
2376          * on an encryption unit boundary, or subsequent reads will get
2377          * corrupted content
2378          */
2379         if (rc == 0) {
2380                 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2381                     start & ~LUSTRE_ENCRYPTION_MASK)
2382                         start = (start & LUSTRE_ENCRYPTION_MASK) +
2383                                 LUSTRE_ENCRYPTION_UNIT_SIZE;
2384                 ll_truncate_pagecache(inode, start);
2385                 rc = osd_trunc_lock(obj, oh, false);
2386         }
2387
2388         RETURN(rc);
2389 }
2390
2391 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
2392                      __u64 start, __u64 end, struct thandle *th)
2393 {
2394         struct osd_object *obj = osd_dt_obj(dt);
2395         struct osd_device *osd = osd_obj2dev(obj);
2396         struct inode *inode = obj->oo_inode;
2397         struct osd_access_lock *al;
2398         struct osd_thandle *oh;
2399         int rc = 0, found = 0;
2400         bool grow = false;
2401         ENTRY;
2402
2403         LASSERT(dt_object_exists(dt));
2404         LASSERT(osd_invariant(obj));
2405         LASSERT(inode != NULL);
2406         dquot_initialize(inode);
2407
2408         LASSERT(th);
2409         oh = container_of(th, struct osd_thandle, ot_super);
2410         LASSERT(oh->ot_handle->h_transaction != NULL);
2411
2412         /* we used to skip truncate to current size to
2413          * optimize truncates on OST. with DoM we can
2414          * get attr_set to set specific size (MDS_REINT)
2415          * and then get truncate RPC which essentially
2416          * would be skipped. this is bad.. so, disable
2417          * this optimization on MDS till the client stop
2418          * to sent MDS_REINT (LU-11033) -bzzz
2419          */
2420         if (osd->od_is_ost && i_size_read(inode) == start)
2421                 RETURN(0);
2422
2423         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
2424
2425         spin_lock(&inode->i_lock);
2426         if (i_size_read(inode) < start)
2427                 grow = true;
2428         i_size_write(inode, start);
2429         spin_unlock(&inode->i_lock);
2430
2431         /* optimize grow case */
2432         if (grow) {
2433                 osd_execute_truncate(obj);
2434                 GOTO(out, rc);
2435         }
2436
2437         inode_lock(inode);
2438         /* add to orphan list to ensure truncate completion
2439          * if this transaction succeed. ldiskfs_truncate()
2440          * will take the inode out of the list
2441          */
2442         rc = ldiskfs_orphan_add(oh->ot_handle, inode);
2443         inode_unlock(inode);
2444         if (rc != 0)
2445                 GOTO(out, rc);
2446
2447         list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2448                 if (obj != al->tl_obj)
2449                         continue;
2450                 LASSERT(al->tl_shared == 0);
2451                 found = 1;
2452                 /* do actual truncate in osd_trans_stop() */
2453                 al->tl_truncate = 1;
2454                 break;
2455         }
2456         LASSERT(found);
2457
2458 out:
2459         RETURN(rc);
2460 }
2461
2462 static int fiemap_check_ranges(struct inode *inode,
2463                                u64 start, u64 len, u64 *new_len)
2464 {
2465         loff_t maxbytes;
2466
2467         *new_len = len;
2468
2469         if (len == 0)
2470                 return -EINVAL;
2471
2472         if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
2473                 maxbytes = inode->i_sb->s_maxbytes;
2474         else
2475                 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
2476
2477         if (start > maxbytes)
2478                 return -EFBIG;
2479
2480         /*
2481          * Shrink request scope to what the fs can actually handle.
2482          */
2483         if (len > maxbytes || (maxbytes - len) < start)
2484                 *new_len = maxbytes - start;
2485
2486         return 0;
2487 }
2488
2489 /* So that the fiemap access checks can't overflow on 32 bit machines. */
2490 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
2491
2492 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
2493                           struct fiemap *fm)
2494 {
2495         struct fiemap_extent_info fieinfo = {0, };
2496         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2497         u64 len;
2498         int rc;
2499         DECLARE_MM_SEGMENT_T(saved_fs);
2500
2501         LASSERT(inode);
2502         if (inode->i_op->fiemap == NULL)
2503                 return -EOPNOTSUPP;
2504
2505         if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
2506                 return -EINVAL;
2507
2508         rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
2509         if (rc)
2510                 return rc;
2511
2512         fieinfo.fi_flags = fm->fm_flags;
2513         fieinfo.fi_extents_max = fm->fm_extent_count;
2514         fieinfo.fi_extents_start = fm->fm_extents;
2515
2516         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
2517                 filemap_write_and_wait(inode->i_mapping);
2518
2519         access_set_kernel(saved_fs, &fieinfo);
2520         rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2521         access_unset_kernel(saved_fs, &fieinfo);
2522         fm->fm_flags = fieinfo.fi_flags;
2523         fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2524
2525         return rc;
2526 }
2527
2528 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2529                        __u64 start, __u64 end, enum lu_ladvise_type advice)
2530 {
2531         struct osd_object *obj = osd_dt_obj(dt);
2532         int rc = 0;
2533         ENTRY;
2534
2535         switch (advice) {
2536         case LU_LADVISE_DONTNEED:
2537                 if (end)
2538                         invalidate_mapping_pages(obj->oo_inode->i_mapping,
2539                                                  start >> PAGE_SHIFT,
2540                                                  (end - 1) >> PAGE_SHIFT);
2541                 break;
2542         default:
2543                 rc = -ENOTSUPP;
2544                 break;
2545         }
2546
2547         RETURN(rc);
2548 }
2549
2550 static loff_t osd_lseek(const struct lu_env *env, struct dt_object *dt,
2551                         loff_t offset, int whence)
2552 {
2553         struct osd_object *obj = osd_dt_obj(dt);
2554         struct osd_device *dev = osd_obj2dev(obj);
2555         struct inode *inode = obj->oo_inode;
2556         struct file *file;
2557         loff_t result;
2558
2559         ENTRY;
2560         LASSERT(dt_object_exists(dt));
2561         LASSERT(osd_invariant(obj));
2562         LASSERT(inode);
2563         LASSERT(offset >= 0);
2564
2565         file = alloc_file_pseudo(inode, dev->od_mnt, "/", O_NOATIME,
2566                                  inode->i_fop);
2567         if (IS_ERR(file))
2568                 RETURN(PTR_ERR(file));
2569
2570         file->f_mode |= FMODE_64BITHASH;
2571         result = file->f_op->llseek(file, offset, whence);
2572         ihold(inode);
2573         fput(file);
2574         /*
2575          * If 'offset' is beyond end of object file then treat it as not error
2576          * but valid case for SEEK_HOLE and return 'offset' as result.
2577          * LOV will decide if it is beyond real end of file or not.
2578          */
2579         if (whence == SEEK_HOLE && result == -ENXIO)
2580                 result = offset;
2581
2582         CDEBUG(D_INFO, "seek %s from %lld: %lld\n", whence == SEEK_HOLE ?
2583                        "hole" : "data", offset, result);
2584         RETURN(result);
2585 }
2586
2587 /*
2588  * in some cases we may need declare methods for objects being created
2589  * e.g., when we create symlink
2590  */
2591 const struct dt_body_operations osd_body_ops_new = {
2592         .dbo_declare_write = osd_declare_write,
2593 };
2594
2595 const struct dt_body_operations osd_body_ops = {
2596         .dbo_read                       = osd_read,
2597         .dbo_declare_write              = osd_declare_write,
2598         .dbo_write                      = osd_write,
2599         .dbo_bufs_get                   = osd_bufs_get,
2600         .dbo_bufs_put                   = osd_bufs_put,
2601         .dbo_write_prep                 = osd_write_prep,
2602         .dbo_declare_write_commit       = osd_declare_write_commit,
2603         .dbo_write_commit               = osd_write_commit,
2604         .dbo_read_prep                  = osd_read_prep,
2605         .dbo_declare_punch              = osd_declare_punch,
2606         .dbo_punch                      = osd_punch,
2607         .dbo_fiemap_get                 = osd_fiemap_get,
2608         .dbo_ladvise                    = osd_ladvise,
2609         .dbo_declare_fallocate          = osd_declare_fallocate,
2610         .dbo_fallocate                  = osd_fallocate,
2611         .dbo_lseek                      = osd_lseek,
2612 };
2613
2614 /**
2615  * Get a truncate lock
2616  *
2617  * In order to take multi-transaction truncate out of main transaction we let
2618  * the caller grab a lock on the object passed. the lock can be shared (for
2619  * writes) and exclusive (for truncate). It's not allowed to mix truncate
2620  * and write in the same transaction handle (do not confuse with big ldiskfs
2621  * transaction containing lots of handles).
2622  * The lock must be taken at declaration.
2623  *
2624  * \param obj           object to lock
2625  * \oh                  transaction
2626  * \shared              shared or exclusive
2627  *
2628  * \retval 0            lock is granted
2629  * \retval -NOMEM       no memory to allocate lock
2630  */
2631 int osd_trunc_lock(struct osd_object *obj, struct osd_thandle *oh, bool shared)
2632 {
2633         struct osd_access_lock *al, *tmp;
2634
2635         LASSERT(obj);
2636         LASSERT(oh);
2637
2638         list_for_each_entry(tmp, &oh->ot_trunc_locks, tl_list) {
2639                 if (tmp->tl_obj != obj)
2640                         continue;
2641                 LASSERT(tmp->tl_shared == shared);
2642                 /* found same lock */
2643                 return 0;
2644         }
2645
2646         OBD_ALLOC_PTR(al);
2647         if (unlikely(al == NULL))
2648                 return -ENOMEM;
2649         al->tl_obj = obj;
2650         al->tl_truncate = false;
2651         if (shared)
2652                 down_read(&obj->oo_ext_idx_sem);
2653         else
2654                 down_write(&obj->oo_ext_idx_sem);
2655         al->tl_shared = shared;
2656         lu_object_get(&obj->oo_dt.do_lu);
2657
2658         list_add(&al->tl_list, &oh->ot_trunc_locks);
2659
2660         return 0;
2661 }
2662
2663 void osd_trunc_unlock_all(const struct lu_env *env, struct list_head *list)
2664 {
2665         struct osd_access_lock *al, *tmp;
2666
2667         list_for_each_entry_safe(al, tmp, list, tl_list) {
2668                 if (al->tl_shared)
2669                         up_read(&al->tl_obj->oo_ext_idx_sem);
2670                 else
2671                         up_write(&al->tl_obj->oo_ext_idx_sem);
2672                 osd_object_put(env, al->tl_obj);
2673                 list_del(&al->tl_list);
2674                 OBD_FREE_PTR(al);
2675         }
2676 }
2677
2678 /* For a partial-page punch, flush punch range to disk immediately */
2679 static void osd_partial_page_flush_punch(struct osd_device *d,
2680                                          struct inode *inode, loff_t start,
2681                                          loff_t end)
2682 {
2683         if (osd_use_page_cache(d)) {
2684                 filemap_fdatawrite_range(inode->i_mapping, start, end);
2685         } else {
2686                 /* Notice we use "wait" version to ensure I/O is complete */
2687                 filemap_write_and_wait_range(inode->i_mapping, start,
2688                                              end);
2689                 invalidate_mapping_pages(inode->i_mapping, start >> PAGE_SHIFT,
2690                                          end >> PAGE_SHIFT);
2691         }
2692 }
2693
2694 /*
2695  * For a partial-page truncate, flush the page to disk immediately to
2696  * avoid data corruption during direct disk write.  b=17397
2697  */
2698 static void osd_partial_page_flush(struct osd_device *d, struct inode *inode,
2699                                    loff_t offset)
2700 {
2701         if (!(offset & ~PAGE_MASK))
2702                 return;
2703
2704         if (osd_use_page_cache(d)) {
2705                 filemap_fdatawrite_range(inode->i_mapping, offset, offset + 1);
2706         } else {
2707                 /* Notice we use "wait" version to ensure I/O is complete */
2708                 filemap_write_and_wait_range(inode->i_mapping, offset,
2709                                              offset + 1);
2710                 invalidate_mapping_pages(inode->i_mapping, offset >> PAGE_SHIFT,
2711                                          offset >> PAGE_SHIFT);
2712         }
2713 }
2714
2715 void osd_execute_truncate(struct osd_object *obj)
2716 {
2717         struct osd_device *d = osd_obj2dev(obj);
2718         struct inode *inode = obj->oo_inode;
2719         __u64 size;
2720
2721         /* simulate crash before (in the middle) of delayed truncate */
2722         if (CFS_FAIL_CHECK(OBD_FAIL_OSD_FAIL_AT_TRUNCATE)) {
2723                 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2724                 struct ldiskfs_sb_info *sbi = LDISKFS_SB(inode->i_sb);
2725
2726                 mutex_lock(&sbi->s_orphan_lock);
2727                 list_del_init(&ei->i_orphan);
2728                 mutex_unlock(&sbi->s_orphan_lock);
2729                 return;
2730         }
2731
2732         size = i_size_read(inode);
2733         inode_lock(inode);
2734         /* if object holds encrypted content, we need to make sure we truncate
2735          * on an encryption unit boundary, or block content will get corrupted
2736          */
2737         if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2738             size & ~LUSTRE_ENCRYPTION_MASK)
2739                 inode->i_size = (size & LUSTRE_ENCRYPTION_MASK) +
2740                         LUSTRE_ENCRYPTION_UNIT_SIZE;
2741         ldiskfs_truncate(inode);
2742         inode_unlock(inode);
2743         if (inode->i_size != size) {
2744                 spin_lock(&inode->i_lock);
2745                 i_size_write(inode, size);
2746                 LDISKFS_I(inode)->i_disksize = size;
2747                 spin_unlock(&inode->i_lock);
2748                 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2749         }
2750         osd_partial_page_flush(d, inode, size);
2751 }
2752
2753 static int osd_execute_punch(const struct lu_env *env, struct osd_object *obj,
2754                              loff_t start, loff_t end, int mode)
2755 {
2756         struct osd_device *d = osd_obj2dev(obj);
2757         struct inode *inode = obj->oo_inode;
2758         struct file *file;
2759         int rc;
2760
2761         file = alloc_file_pseudo(inode, d->od_mnt, "/", O_NOATIME,
2762                                  inode->i_fop);
2763         if (IS_ERR(file))
2764                 RETURN(PTR_ERR(file));
2765
2766         file->f_mode |= FMODE_64BITHASH;
2767         rc = file->f_op->fallocate(file, mode, start, end - start);
2768         ihold(inode);
2769         fput(file);
2770         if (rc == 0)
2771                 osd_partial_page_flush_punch(d, inode, start, end - 1);
2772         return rc;
2773 }
2774
2775 int osd_process_truncates(const struct lu_env *env, struct list_head *list)
2776 {
2777         struct osd_access_lock *al;
2778         int rc = 0;
2779
2780         LASSERT(!journal_current_handle());
2781
2782         list_for_each_entry(al, list, tl_list) {
2783                 if (al->tl_shared)
2784                         continue;
2785                 if (al->tl_truncate)
2786                         osd_execute_truncate(al->tl_obj);
2787                 else if (al->tl_punch)
2788                         rc = osd_execute_punch(env, al->tl_obj, al->tl_start,
2789                                                al->tl_end, al->tl_mode);
2790         }
2791
2792         return rc;
2793 }