Whamcloud - gitweb
LU-11279 lod: reset ostlist properly in lod_get_default_lov_striping
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd/osd_io.c
33  *
34  * body operations
35  *
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
38  *
39  */
40
41 /* prerequisite for linux/xattr.h */
42 #include <linux/types.h>
43 /* prerequisite for linux/xattr.h */
44 #include <linux/fs.h>
45 #include <linux/mm.h>
46 #include <linux/pagevec.h>
47
48 /*
49  * struct OBD_{ALLOC,FREE}*()
50  * OBD_FAIL_CHECK
51  */
52 #include <obd_support.h>
53
54 #include "osd_internal.h"
55
56 /* ext_depth() */
57 #include <ldiskfs/ldiskfs_extents.h>
58
59 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
60                             int rw, int line, int pages)
61 {
62         int blocks, i;
63
64         LASSERTF(iobuf->dr_elapsed_valid == 0,
65                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
66                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
67                  iobuf->dr_init_at);
68         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
69
70         init_waitqueue_head(&iobuf->dr_wait);
71         atomic_set(&iobuf->dr_numreqs, 0);
72         iobuf->dr_npages = 0;
73         iobuf->dr_error = 0;
74         iobuf->dr_dev = d;
75         iobuf->dr_frags = 0;
76         iobuf->dr_elapsed = ktime_set(0, 0);
77         /* must be counted before, so assert */
78         iobuf->dr_rw = rw;
79         iobuf->dr_init_at = line;
80
81         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
82         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
83                 LASSERT(iobuf->dr_pg_buf.lb_len >=
84                         pages * sizeof(iobuf->dr_pages[0]));
85                 return 0;
86         }
87
88         /* start with 1MB for 4K blocks */
89         i = 256;
90         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
91                 i <<= 1;
92
93         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
94                (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
95         pages = i;
96         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
97         iobuf->dr_max_pages = 0;
98         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
99                (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
100
101         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
102         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
103         if (unlikely(iobuf->dr_blocks == NULL))
104                 return -ENOMEM;
105
106         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
107         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
108         if (unlikely(iobuf->dr_pages == NULL))
109                 return -ENOMEM;
110
111         iobuf->dr_max_pages = pages;
112
113         return 0;
114 }
115 #define osd_init_iobuf(dev, iobuf, rw, pages) \
116         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
117
118 static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
119 {
120         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
121         iobuf->dr_pages[iobuf->dr_npages++] = page;
122 }
123
124 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
125 {
126         int rw = iobuf->dr_rw;
127
128         if (iobuf->dr_elapsed_valid) {
129                 iobuf->dr_elapsed_valid = 0;
130                 LASSERT(iobuf->dr_dev == d);
131                 LASSERT(iobuf->dr_frags > 0);
132                 lprocfs_oh_tally(&d->od_brw_stats.
133                                  hist[BRW_R_DIO_FRAGS+rw],
134                                  iobuf->dr_frags);
135                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
136                                       ktime_to_ms(iobuf->dr_elapsed));
137         }
138 }
139
140 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
141 static void dio_complete_routine(struct bio *bio)
142 {
143 # ifdef HAVE_BI_STATUS
144         int error = bio->bi_status;
145 # else
146         int error = bio->bi_error;
147 # endif
148 #else
149 static void dio_complete_routine(struct bio *bio, int error)
150 {
151 #endif
152         struct osd_iobuf *iobuf = bio->bi_private;
153         int iter;
154         struct bio_vec *bvl;
155
156         /* CAVEAT EMPTOR: possibly in IRQ context
157          * DO NOT record procfs stats here!!! */
158
159         if (unlikely(iobuf == NULL)) {
160                 CERROR("***** bio->bi_private is NULL!  This should never "
161                        "happen.  Normally, I would crash here, but instead I "
162                        "will dump the bio contents to the console.  Please "
163                        "report this to <https://jira.hpdd.intel.com/> , along "
164                        "with any interesting messages leading up to this point "
165                        "(like SCSI errors, perhaps).  Because bi_private is "
166                        "NULL, I can't wake up the thread that initiated this "
167                        "IO - you will probably have to reboot this node.\n");
168                 CERROR("bi_next: %p, bi_flags: %lx, "
169 #ifdef HAVE_BI_RW
170                        "bi_rw: %lu,"
171 #else
172                        "bi_opf: %u,"
173 #endif
174                        "bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p,"
175                        "bi_cnt: %d, bi_private: %p\n", bio->bi_next,
176                         (unsigned long)bio->bi_flags,
177 #ifdef HAVE_BI_RW
178                         bio->bi_rw,
179 #else
180                         bio->bi_opf,
181 #endif
182                         bio->bi_vcnt, bio_idx(bio),
183                         bio_sectors(bio) << 9, bio->bi_end_io,
184 #ifdef HAVE_BI_CNT
185                         atomic_read(&bio->bi_cnt),
186 #else
187                         atomic_read(&bio->__bi_cnt),
188 #endif
189                         bio->bi_private);
190                 return;
191         }
192
193         /* the check is outside of the cycle for performance reason -bzzz */
194         if (!bio_data_dir(bio)) {
195                 bio_for_each_segment_all(bvl, bio, iter) {
196                         if (likely(error == 0))
197                                 SetPageUptodate(bvl_to_page(bvl));
198                         LASSERT(PageLocked(bvl_to_page(bvl)));
199                 }
200                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
201         } else {
202                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
203         }
204
205         /* any real error is good enough -bzzz */
206         if (error != 0 && iobuf->dr_error == 0)
207                 iobuf->dr_error = error;
208
209         /*
210          * set dr_elapsed before dr_numreqs turns to 0, otherwise
211          * it's possible that service thread will see dr_numreqs
212          * is zero, but dr_elapsed is not set yet, leading to lost
213          * data in this processing and an assertion in a subsequent
214          * call to OSD.
215          */
216         if (atomic_read(&iobuf->dr_numreqs) == 1) {
217                 ktime_t now = ktime_get();
218
219                 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
220                 iobuf->dr_elapsed_valid = 1;
221         }
222         if (atomic_dec_and_test(&iobuf->dr_numreqs))
223                 wake_up(&iobuf->dr_wait);
224
225         /* Completed bios used to be chained off iobuf->dr_bios and freed in
226          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
227          * mempool when serious on-disk fragmentation was encountered,
228          * deadlocking the OST.  The bios are now released as soon as complete
229          * so the pool cannot be exhausted while IOs are competing. bug 10076 */
230         bio_put(bio);
231 }
232
233 static void record_start_io(struct osd_iobuf *iobuf, int size)
234 {
235         struct osd_device    *osd = iobuf->dr_dev;
236         struct obd_histogram *h = osd->od_brw_stats.hist;
237
238         iobuf->dr_frags++;
239         atomic_inc(&iobuf->dr_numreqs);
240
241         if (iobuf->dr_rw == 0) {
242                 atomic_inc(&osd->od_r_in_flight);
243                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
244                                  atomic_read(&osd->od_r_in_flight));
245                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
246         } else if (iobuf->dr_rw == 1) {
247                 atomic_inc(&osd->od_w_in_flight);
248                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
249                                  atomic_read(&osd->od_w_in_flight));
250                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
251         } else {
252                 LBUG();
253         }
254 }
255
256 static void osd_submit_bio(int rw, struct bio *bio)
257 {
258         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
259 #ifdef HAVE_SUBMIT_BIO_2ARGS
260         if (rw == 0)
261                 submit_bio(READ, bio);
262         else
263                 submit_bio(WRITE, bio);
264 #else
265         bio->bi_opf |= rw;
266         submit_bio(bio);
267 #endif
268 }
269
270 static int can_be_merged(struct bio *bio, sector_t sector)
271 {
272         if (bio == NULL)
273                 return 0;
274
275         return bio_end_sector(bio) == sector ? 1 : 0;
276 }
277
278 /*
279  * This function will change the data written, thus it should only be
280  * used when checking data integrity feature
281  */
282 static void bio_integrity_fault_inject(struct bio *bio)
283 {
284         struct bio_vec *bvec;
285         int i;
286         void *kaddr;
287         char *addr;
288
289         bio_for_each_segment_all(bvec, bio, i) {
290                 struct page *page = bvec->bv_page;
291
292                 kaddr = kmap(page);
293                 addr = kaddr;
294                 *addr = ~(*addr);
295                 kunmap(page);
296                 break;
297         }
298 }
299
300 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
301                       struct osd_iobuf *iobuf)
302 {
303         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
304         struct page **pages = iobuf->dr_pages;
305         int npages = iobuf->dr_npages;
306         sector_t *blocks = iobuf->dr_blocks;
307         int total_blocks = npages * blocks_per_page;
308         int sector_bits = inode->i_sb->s_blocksize_bits - 9;
309         unsigned int blocksize = inode->i_sb->s_blocksize;
310         struct bio *bio = NULL;
311         struct page *page;
312         unsigned int page_offset;
313         sector_t sector;
314         int nblocks;
315         int block_idx;
316         int page_idx;
317         int i;
318         int rc = 0;
319         bool fault_inject;
320         DECLARE_PLUG(plug);
321         ENTRY;
322
323         fault_inject = OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT);
324         LASSERT(iobuf->dr_npages == npages);
325
326         osd_brw_stats_update(osd, iobuf);
327         iobuf->dr_start_time = ktime_get();
328
329         blk_start_plug(&plug);
330         for (page_idx = 0, block_idx = 0;
331              page_idx < npages;
332              page_idx++, block_idx += blocks_per_page) {
333
334                 page = pages[page_idx];
335                 LASSERT(block_idx + blocks_per_page <= total_blocks);
336
337                 for (i = 0, page_offset = 0;
338                      i < blocks_per_page;
339                      i += nblocks, page_offset += blocksize * nblocks) {
340
341                         nblocks = 1;
342
343                         if (blocks[block_idx + i] == 0) {  /* hole */
344                                 LASSERTF(iobuf->dr_rw == 0,
345                                          "page_idx %u, block_idx %u, i %u\n",
346                                          page_idx, block_idx, i);
347                                 memset(kmap(page) + page_offset, 0, blocksize);
348                                 kunmap(page);
349                                 continue;
350                         }
351
352                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
353
354                         /* Additional contiguous file blocks? */
355                         while (i + nblocks < blocks_per_page &&
356                                (sector + (nblocks << sector_bits)) ==
357                                ((sector_t)blocks[block_idx + i + nblocks] <<
358                                 sector_bits))
359                                 nblocks++;
360
361                         if (bio != NULL &&
362                             can_be_merged(bio, sector) &&
363                             bio_add_page(bio, page,
364                                          blocksize * nblocks, page_offset) != 0)
365                                 continue;       /* added this frag OK */
366
367                         if (bio != NULL) {
368                                 struct request_queue *q = bio_get_queue(bio);
369                                 unsigned int bi_size = bio_sectors(bio) << 9;
370
371                                 /* Dang! I have to fragment this I/O */
372                                 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
373                                        "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
374                                        bi_size, bio->bi_vcnt, bio->bi_max_vecs,
375                                        bio_sectors(bio),
376                                        queue_max_sectors(q),
377                                        bio_phys_segments(q, bio),
378                                        queue_max_phys_segments(q),
379                                        0, queue_max_hw_segments(q));
380                                 if (bio_integrity_enabled(bio)) {
381                                         if (bio_integrity_prep(bio)) {
382                                                 bio_put(bio);
383                                                 rc = -EIO;
384                                                 goto out;
385                                         }
386                                         if (unlikely(fault_inject))
387                                                 bio_integrity_fault_inject(bio);
388                                 }
389
390                                 record_start_io(iobuf, bi_size);
391                                 osd_submit_bio(iobuf->dr_rw, bio);
392                         }
393
394                         /* allocate new bio */
395                         bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
396                                                       (npages - page_idx) *
397                                                       blocks_per_page));
398                         if (bio == NULL) {
399                                 CERROR("Can't allocate bio %u*%u = %u pages\n",
400                                        (npages - page_idx), blocks_per_page,
401                                        (npages - page_idx) * blocks_per_page);
402                                 rc = -ENOMEM;
403                                 goto out;
404                         }
405
406                         bio_set_dev(bio, inode->i_sb->s_bdev);
407                         bio_set_sector(bio, sector);
408 #ifdef HAVE_BI_RW
409                         bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
410 #else
411                         bio->bi_opf = (iobuf->dr_rw == 0) ? READ : WRITE;
412 #endif
413                         bio->bi_end_io = dio_complete_routine;
414                         bio->bi_private = iobuf;
415
416                         rc = bio_add_page(bio, page,
417                                           blocksize * nblocks, page_offset);
418                         LASSERT(rc != 0);
419                 }
420         }
421
422         if (bio != NULL) {
423                 if (bio_integrity_enabled(bio)) {
424                         if (bio_integrity_prep(bio)) {
425                                 bio_put(bio);
426                                 rc = -EIO;
427                                 goto out;
428                         }
429                         if (unlikely(fault_inject))
430                                 bio_integrity_fault_inject(bio);
431                 }
432
433                 record_start_io(iobuf, bio_sectors(bio) << 9);
434                 osd_submit_bio(iobuf->dr_rw, bio);
435                 rc = 0;
436         }
437
438 out:
439         blk_finish_plug(&plug);
440
441         /* in order to achieve better IO throughput, we don't wait for writes
442          * completion here. instead we proceed with transaction commit in
443          * parallel and wait for IO completion once transaction is stopped
444          * see osd_trans_stop() for more details -bzzz */
445         if (iobuf->dr_rw == 0 || fault_inject) {
446                 wait_event(iobuf->dr_wait,
447                            atomic_read(&iobuf->dr_numreqs) == 0);
448                 osd_fini_iobuf(osd, iobuf);
449         }
450
451         if (rc == 0)
452                 rc = iobuf->dr_error;
453         RETURN(rc);
454 }
455
456 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
457                                    struct niobuf_local *lnb)
458 {
459         ENTRY;
460
461         *nrpages = 0;
462
463         while (len > 0) {
464                 int poff = offset & (PAGE_SIZE - 1);
465                 int plen = PAGE_SIZE - poff;
466
467                 if (plen > len)
468                         plen = len;
469                 lnb->lnb_file_offset = offset;
470                 lnb->lnb_page_offset = poff;
471                 lnb->lnb_len = plen;
472                 /* lnb->lnb_flags = rnb->rnb_flags; */
473                 lnb->lnb_flags = 0;
474                 lnb->lnb_page = NULL;
475                 lnb->lnb_rc = 0;
476
477                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
478                          (long long) len);
479                 offset += plen;
480                 len -= plen;
481                 lnb++;
482                 (*nrpages)++;
483         }
484
485         RETURN(0);
486 }
487
488 static struct page *osd_get_page(struct dt_object *dt, loff_t offset,
489                                  gfp_t gfp_mask)
490 {
491         struct inode *inode = osd_dt_obj(dt)->oo_inode;
492         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
493         struct page *page;
494
495         LASSERT(inode);
496
497         page = find_or_create_page(inode->i_mapping, offset >> PAGE_SHIFT,
498                                    gfp_mask);
499
500         if (unlikely(page == NULL))
501                 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
502
503         return page;
504 }
505
506 /*
507  * there are following "locks":
508  * journal_start
509  * i_mutex
510  * page lock
511  *
512  * osd write path:
513  *  - lock page(s)
514  *  - journal_start
515  *  - truncate_sem
516  *
517  * ext4 vmtruncate:
518  *  - lock pages, unlock
519  *  - journal_start
520  *  - lock partial page
521  *  - i_data_sem
522  *
523  */
524
525 /**
526  * Unlock and release pages loaded by osd_bufs_get()
527  *
528  * Unlock \a npages pages from \a lnb and drop the refcount on them.
529  *
530  * \param env           thread execution environment
531  * \param dt            dt object undergoing IO (OSD object + methods)
532  * \param lnb           array of pages undergoing IO
533  * \param npages        number of pages in \a lnb
534  *
535  * \retval 0            always
536  */
537 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
538                         struct niobuf_local *lnb, int npages)
539 {
540         struct pagevec pvec;
541         int i;
542
543 #ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
544         pagevec_init(&pvec);
545 #else
546         pagevec_init(&pvec, 0);
547 #endif
548
549         for (i = 0; i < npages; i++) {
550                 if (lnb[i].lnb_page == NULL)
551                         continue;
552                 LASSERT(PageLocked(lnb[i].lnb_page));
553                 unlock_page(lnb[i].lnb_page);
554                 if (pagevec_add(&pvec, lnb[i].lnb_page) == 0)
555                         pagevec_release(&pvec);
556                 dt_object_put(env, dt);
557                 lnb[i].lnb_page = NULL;
558         }
559
560         /* Release any partial pagevec */
561         pagevec_release(&pvec);
562
563         RETURN(0);
564 }
565
566 /**
567  * Load and lock pages undergoing IO
568  *
569  * Pages as described in the \a lnb array are fetched (from disk or cache)
570  * and locked for IO by the caller.
571  *
572  * DLM locking protects us from write and truncate competing for same region,
573  * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
574  * It's possible the writeout on a such a page is in progress when we access
575  * it. It's also possible that during this writeout we put new (partial) data
576  * into the page, but won't be able to proceed in filter_commitrw_write().
577  * Therefore, just wait for writeout completion as it should be rare enough.
578  *
579  * \param env           thread execution environment
580  * \param dt            dt object undergoing IO (OSD object + methods)
581  * \param pos           byte offset of IO start
582  * \param len           number of bytes of IO
583  * \param lnb           array of extents undergoing IO
584  * \param rw            read or write operation, and other flags
585  * \param capa          capabilities
586  *
587  * \retval pages        (zero or more) loaded successfully
588  * \retval -ENOMEM      on memory/page allocation error
589  */
590 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
591                         loff_t pos, ssize_t len, struct niobuf_local *lnb,
592                         enum dt_bufs_type rw)
593 {
594         struct osd_object *obj = osd_dt_obj(dt);
595         int npages, i, rc = 0;
596         gfp_t gfp_mask;
597
598         LASSERT(obj->oo_inode);
599
600         osd_map_remote_to_local(pos, len, &npages, lnb);
601
602         /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
603         gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
604                                              GFP_HIGHUSER;
605         for (i = 0; i < npages; i++, lnb++) {
606                 lnb->lnb_page = osd_get_page(dt, lnb->lnb_file_offset,
607                                              gfp_mask);
608                 if (lnb->lnb_page == NULL)
609                         GOTO(cleanup, rc = -ENOMEM);
610
611                 wait_on_page_writeback(lnb->lnb_page);
612                 BUG_ON(PageWriteback(lnb->lnb_page));
613
614                 lu_object_get(&dt->do_lu);
615         }
616
617         RETURN(i);
618
619 cleanup:
620         if (i > 0)
621                 osd_bufs_put(env, dt, lnb - i, i);
622         return rc;
623 }
624
625 #ifndef HAVE_LDISKFS_MAP_BLOCKS
626
627 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
628 #define ldiskfs_ext_pblock(ex) ext_pblock((ex))
629 #endif
630
631 struct bpointers {
632         sector_t *blocks;
633         unsigned long start;
634         int num;
635         int init_num;
636         int create;
637 };
638
639 static long ldiskfs_ext_find_goal(struct inode *inode,
640                                   struct ldiskfs_ext_path *path,
641                                   unsigned long block, int *aflags)
642 {
643         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
644         unsigned long bg_start;
645         unsigned long colour;
646         int depth;
647
648         if (path) {
649                 struct ldiskfs_extent *ex;
650                 depth = path->p_depth;
651
652                 /* try to predict block placement */
653                 if ((ex = path[depth].p_ext))
654                         return ldiskfs_ext_pblock(ex) +
655                                 (block - le32_to_cpu(ex->ee_block));
656
657                 /* it looks index is empty
658                  * try to find starting from index itself */
659                 if (path[depth].p_bh)
660                         return path[depth].p_bh->b_blocknr;
661         }
662
663         /* OK. use inode's group */
664         bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) +
665                 le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block);
666         colour = (current->pid % 16) *
667                 (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16);
668         return bg_start + colour + block;
669 }
670
671 static unsigned long new_blocks(handle_t *handle, struct inode *inode,
672                                 struct ldiskfs_ext_path *path,
673                                 unsigned long block, unsigned long *count,
674                                 int *err)
675 {
676         struct ldiskfs_allocation_request ar;
677         unsigned long pblock;
678         int aflags;
679
680         /* find neighbour allocated blocks */
681         ar.lleft = block;
682         *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
683         if (*err)
684                 return 0;
685         ar.lright = block;
686         *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright);
687         if (*err)
688                 return 0;
689
690         /* allocate new block */
691         ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags);
692         ar.inode = inode;
693         ar.logical = block;
694         ar.len = *count;
695         ar.flags = LDISKFS_MB_HINT_DATA;
696         pblock = ldiskfs_mb_new_blocks(handle, &ar, err);
697         *count = ar.len;
698         return pblock;
699 }
700
701 static int ldiskfs_ext_new_extent_cb(struct inode *inode,
702                                      struct ldiskfs_ext_path *path,
703                                      struct ldiskfs_ext_cache *cex,
704 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
705                                      struct ldiskfs_extent *ex,
706 #endif
707                                      void *cbdata)
708 {
709         struct bpointers *bp = cbdata;
710         struct ldiskfs_extent nex;
711         unsigned long pblock = 0;
712         unsigned long tgen;
713         int err, i;
714         unsigned long count;
715         handle_t *handle;
716
717 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
718         if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) {
719 #else
720         if ((cex->ec_len != 0) && (cex->ec_start != 0)) {
721 #endif
722                 err = EXT_CONTINUE;
723                 goto map;
724         }
725
726         if (bp->create == 0) {
727                 i = 0;
728                 if (cex->ec_block < bp->start)
729                         i = bp->start - cex->ec_block;
730                 if (i >= cex->ec_len)
731                         CERROR("nothing to do?! i = %d, e_num = %u\n",
732                                         i, cex->ec_len);
733                 for (; i < cex->ec_len && bp->num; i++) {
734                         *(bp->blocks) = 0;
735                         bp->blocks++;
736                         bp->num--;
737                         bp->start++;
738                 }
739
740                 return EXT_CONTINUE;
741         }
742
743         tgen = LDISKFS_I(inode)->i_ext_generation;
744         count = ldiskfs_ext_calc_credits_for_insert(inode, path);
745
746         handle = osd_journal_start(inode, LDISKFS_HT_MISC,
747                                    count + LDISKFS_ALLOC_NEEDED + 1);
748         if (IS_ERR(handle)) {
749                 return PTR_ERR(handle);
750         }
751
752         if (tgen != LDISKFS_I(inode)->i_ext_generation) {
753                 /* the tree has changed. so path can be invalid at moment */
754                 ldiskfs_journal_stop(handle);
755                 return EXT_REPEAT;
756         }
757
758         /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not
759          * protected by i_data_sem as whole. so we patch it to store
760          * generation to path and now verify the tree hasn't changed */
761         down_write((&LDISKFS_I(inode)->i_data_sem));
762
763         /* validate extent, make sure the extent tree does not changed */
764         if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) {
765                 /* cex is invalid, try again */
766                 up_write(&LDISKFS_I(inode)->i_data_sem);
767                 ldiskfs_journal_stop(handle);
768                 return EXT_REPEAT;
769         }
770
771         count = cex->ec_len;
772         pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err);
773         if (!pblock)
774                 goto out;
775         BUG_ON(count > cex->ec_len);
776
777         /* insert new extent */
778         nex.ee_block = cpu_to_le32(cex->ec_block);
779         ldiskfs_ext_store_pblock(&nex, pblock);
780         nex.ee_len = cpu_to_le16(count);
781         err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0);
782         if (err) {
783                 /* free data blocks we just allocated */
784                 /* not a good idea to call discard here directly,
785                  * but otherwise we'd need to call it every free() */
786                 ldiskfs_discard_preallocations(inode);
787 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
788                 ldiskfs_free_blocks(handle, inode, NULL,
789                                     ldiskfs_ext_pblock(&nex),
790                                     le16_to_cpu(nex.ee_len), 0);
791 #else
792                 ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex),
793                                     le16_to_cpu(nex.ee_len), 0);
794 #endif
795                 goto out;
796         }
797
798         /*
799          * Putting len of the actual extent we just inserted,
800          * we are asking ldiskfs_ext_walk_space() to continue
801          * scaning after that block
802          */
803         cex->ec_len = le16_to_cpu(nex.ee_len);
804         cex->ec_start = ldiskfs_ext_pblock(&nex);
805         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
806         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
807
808 out:
809         up_write((&LDISKFS_I(inode)->i_data_sem));
810         ldiskfs_journal_stop(handle);
811 map:
812         if (err >= 0) {
813                 /* map blocks */
814                 if (bp->num == 0) {
815                         CERROR("hmm. why do we find this extent?\n");
816                         CERROR("initial space: %lu:%u\n",
817                                 bp->start, bp->init_num);
818 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
819                         CERROR("current extent: %u/%u/%llu %d\n",
820                                 cex->ec_block, cex->ec_len,
821                                 (unsigned long long)cex->ec_start,
822                                 cex->ec_type);
823 #else
824                         CERROR("current extent: %u/%u/%llu\n",
825                                 cex->ec_block, cex->ec_len,
826                                 (unsigned long long)cex->ec_start);
827 #endif
828                 }
829                 i = 0;
830                 if (cex->ec_block < bp->start)
831                         i = bp->start - cex->ec_block;
832                 if (i >= cex->ec_len)
833                         CERROR("nothing to do?! i = %d, e_num = %u\n",
834                                         i, cex->ec_len);
835                 for (; i < cex->ec_len && bp->num; i++) {
836                         *(bp->blocks) = cex->ec_start + i;
837                         if (pblock != 0) {
838                                 /* unmap any possible underlying metadata from
839                                  * the block device mapping.  bug 6998. */
840 #ifndef HAVE_CLEAN_BDEV_ALIASES
841                                 unmap_underlying_metadata(inode->i_sb->s_bdev,
842                                                           *(bp->blocks));
843 #else
844                                 clean_bdev_aliases(inode->i_sb->s_bdev,
845                                                    *(bp->blocks), 1);
846 #endif
847                         }
848                         bp->blocks++;
849                         bp->num--;
850                         bp->start++;
851                 }
852         }
853         return err;
854 }
855
856 static int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long index,
857                                    int clen, sector_t *blocks, int create)
858 {
859         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
860         struct bpointers bp;
861         int err;
862
863         if (index + clen >= inode->i_sb->s_maxbytes >> PAGE_SHIFT)
864                 return -EFBIG;
865
866         bp.blocks = blocks;
867         bp.start = index * blocks_per_page;
868         bp.init_num = bp.num = clen * blocks_per_page;
869         bp.create = create;
870
871         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
872                bp.start, bp.start + bp.num - 1, (unsigned)inode->i_ino);
873
874         err = ldiskfs_ext_walk_space(inode, bp.start, bp.num,
875                                      ldiskfs_ext_new_extent_cb, &bp);
876         ldiskfs_ext_invalidate_cache(inode);
877
878         return err;
879 }
880
881 static int osd_ldiskfs_map_bm_inode_pages(struct inode *inode,
882                                           struct page **page, int pages,
883                                           sector_t *blocks, int create)
884 {
885         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
886         pgoff_t bitmap_max_page_index;
887         sector_t *b;
888         int rc = 0, i;
889
890         bitmap_max_page_index = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes >>
891                                 PAGE_SHIFT;
892         for (i = 0, b = blocks; i < pages; i++, page++) {
893                 if ((*page)->index + 1 >= bitmap_max_page_index) {
894                         rc = -EFBIG;
895                         break;
896                 }
897                 rc = ldiskfs_map_inode_page(inode, *page, b, create);
898                 if (rc) {
899                         CERROR("ino %lu, blk %llu create %d: rc %d\n",
900                                inode->i_ino,
901                                (unsigned long long)*b, create, rc);
902                         break;
903                 }
904                 b += blocks_per_page;
905         }
906         return rc;
907 }
908
909 static int osd_ldiskfs_map_ext_inode_pages(struct inode *inode,
910                                            struct page **page,
911                                            int pages, sector_t *blocks,
912                                            int create)
913 {
914         int rc = 0, i = 0, clen = 0;
915         struct page *fp = NULL;
916
917         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
918                 inode->i_ino, pages, (*page)->index);
919
920         /* pages are sorted already. so, we just have to find
921          * contig. space and process them properly */
922         while (i < pages) {
923                 if (fp == NULL) {
924                         /* start new extent */
925                         fp = *page++;
926                         clen = 1;
927                         i++;
928                         continue;
929                 } else if (fp->index + clen == (*page)->index) {
930                         /* continue the extent */
931                         page++;
932                         clen++;
933                         i++;
934                         continue;
935                 }
936
937                 /* process found extent */
938                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
939                                              blocks, create);
940                 if (rc)
941                         GOTO(cleanup, rc);
942
943                 /* look for next extent */
944                 fp = NULL;
945                 blocks += clen * (PAGE_SIZE >> inode->i_blkbits);
946         }
947
948         if (fp)
949                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
950                                              blocks, create);
951
952 cleanup:
953         return rc;
954 }
955
956 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
957                                        int pages, sector_t *blocks,
958                                        int create)
959 {
960         int rc;
961
962         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
963                 rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
964                                                      blocks, create);
965                 return rc;
966         }
967         rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create);
968
969         return rc;
970 }
971 #else
972 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
973                                        int pages, sector_t *blocks,
974                                        int create)
975 {
976         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
977         int rc = 0, i = 0;
978         struct page *fp = NULL;
979         int clen = 0;
980         pgoff_t max_page_index;
981         handle_t *handle = NULL;
982
983         max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
984
985         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
986                 inode->i_ino, pages, (*page)->index);
987
988         if (create) {
989                 create = LDISKFS_GET_BLOCKS_CREATE;
990                 handle = ldiskfs_journal_current_handle();
991                 LASSERT(handle != NULL);
992                 rc = osd_attach_jinode(inode);
993                 if (rc)
994                         return rc;
995         }
996         /* pages are sorted already. so, we just have to find
997          * contig. space and process them properly */
998         while (i < pages) {
999                 long blen, total = 0;
1000                 struct ldiskfs_map_blocks map = { 0 };
1001
1002                 if (fp == NULL) { /* start new extent */
1003                         fp = *page++;
1004                         clen = 1;
1005                         if (++i != pages)
1006                                 continue;
1007                 } else if (fp->index + clen == (*page)->index) {
1008                         /* continue the extent */
1009                         page++;
1010                         clen++;
1011                         if (++i != pages)
1012                                 continue;
1013                 }
1014                 if (fp->index + clen >= max_page_index)
1015                         GOTO(cleanup, rc = -EFBIG);
1016                 /* process found extent */
1017                 map.m_lblk = fp->index * blocks_per_page;
1018                 map.m_len = blen = clen * blocks_per_page;
1019 cont_map:
1020                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
1021                 if (rc >= 0) {
1022                         int c = 0;
1023                         for (; total < blen && c < map.m_len; c++, total++) {
1024                                 if (rc == 0) {
1025                                         *(blocks + total) = 0;
1026                                         total++;
1027                                         break;
1028                                 } else {
1029                                         *(blocks + total) = map.m_pblk + c;
1030                                         /* unmap any possible underlying
1031                                          * metadata from the block device
1032                                          * mapping.  bug 6998. */
1033                                         if ((map.m_flags & LDISKFS_MAP_NEW) &&
1034                                             create)
1035 #ifndef HAVE_CLEAN_BDEV_ALIASES
1036                                                 unmap_underlying_metadata(
1037                                                         inode->i_sb->s_bdev,
1038                                                         map.m_pblk + c);
1039 #else
1040                                                 clean_bdev_aliases(
1041                                                         inode->i_sb->s_bdev,
1042                                                         map.m_pblk + c, 1);
1043 #endif
1044                                 }
1045                         }
1046                         rc = 0;
1047                 }
1048                 if (rc == 0 && total < blen) {
1049                         map.m_lblk = fp->index * blocks_per_page + total;
1050                         map.m_len = blen - total;
1051                         goto cont_map;
1052                 }
1053                 if (rc != 0)
1054                         GOTO(cleanup, rc);
1055
1056                 /* look for next extent */
1057                 fp = NULL;
1058                 blocks += blocks_per_page * clen;
1059         }
1060 cleanup:
1061         return rc;
1062 }
1063 #endif /* HAVE_LDISKFS_MAP_BLOCKS */
1064
1065 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1066                           struct niobuf_local *lnb, int npages)
1067 {
1068         struct osd_thread_info *oti   = osd_oti_get(env);
1069         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
1070         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
1071         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
1072         ktime_t start;
1073         ktime_t end;
1074         s64 timediff;
1075         ssize_t                 isize;
1076         __s64                   maxidx;
1077         int                     rc = 0;
1078         int                     i;
1079         int                     cache = 0;
1080
1081         LASSERT(inode);
1082
1083         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1084         if (unlikely(rc != 0))
1085                 RETURN(rc);
1086
1087         isize = i_size_read(inode);
1088         maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1089
1090         if (osd->od_writethrough_cache)
1091                 cache = 1;
1092         if (isize > osd->od_readcache_max_filesize)
1093                 cache = 0;
1094
1095         start = ktime_get();
1096         for (i = 0; i < npages; i++) {
1097
1098                 if (cache == 0)
1099                         generic_error_remove_page(inode->i_mapping,
1100                                                   lnb[i].lnb_page);
1101
1102                 /*
1103                  * till commit the content of the page is undefined
1104                  * we'll set it uptodate once bulk is done. otherwise
1105                  * subsequent reads can access non-stable data
1106                  */
1107                 ClearPageUptodate(lnb[i].lnb_page);
1108
1109                 if (lnb[i].lnb_len == PAGE_SIZE)
1110                         continue;
1111
1112                 if (maxidx >= lnb[i].lnb_page->index) {
1113                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1114                 } else {
1115                         long off;
1116                         char *p = kmap(lnb[i].lnb_page);
1117
1118                         off = lnb[i].lnb_page_offset;
1119                         if (off)
1120                                 memset(p, 0, off);
1121                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1122                               ~PAGE_MASK;
1123                         if (off)
1124                                 memset(p + off, 0, PAGE_SIZE - off);
1125                         kunmap(lnb[i].lnb_page);
1126                 }
1127         }
1128         end = ktime_get();
1129         timediff = ktime_us_delta(end, start);
1130         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1131
1132         if (iobuf->dr_npages) {
1133                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1134                                                  iobuf->dr_npages,
1135                                                  iobuf->dr_blocks, 0);
1136                 if (likely(rc == 0)) {
1137                         rc = osd_do_bio(osd, inode, iobuf);
1138                         /* do IO stats for preparation reads */
1139                         osd_fini_iobuf(osd, iobuf);
1140                 }
1141         }
1142         RETURN(rc);
1143 }
1144
1145 struct osd_fextent {
1146         sector_t        start;
1147         sector_t        end;
1148         unsigned int    mapped:1;
1149 };
1150
1151 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1152                          struct osd_fextent *cached_extent)
1153 {
1154         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1155         sector_t block = offset >> inode->i_blkbits;
1156         sector_t start;
1157         struct fiemap_extent_info fei = { 0 };
1158         struct fiemap_extent fe = { 0 };
1159         mm_segment_t saved_fs;
1160         int rc;
1161
1162         if (block >= cached_extent->start && block < cached_extent->end)
1163                 return cached_extent->mapped;
1164
1165         if (i_size_read(inode) == 0)
1166                 return 0;
1167
1168         /* Beyond EOF, must not be mapped */
1169         if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1170                 return 0;
1171
1172         fei.fi_extents_max = 1;
1173         fei.fi_extents_start = &fe;
1174
1175         saved_fs = get_fs();
1176         set_fs(get_ds());
1177         rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1178         set_fs(saved_fs);
1179         if (rc != 0)
1180                 return 0;
1181
1182         start = fe.fe_logical >> inode->i_blkbits;
1183
1184         if (start > block) {
1185                 cached_extent->start = block;
1186                 cached_extent->end = start;
1187                 cached_extent->mapped = 0;
1188         } else {
1189                 cached_extent->start = start;
1190                 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1191                                       inode->i_blkbits;
1192                 cached_extent->mapped = 1;
1193         }
1194
1195         return cached_extent->mapped;
1196 }
1197
1198 static int osd_declare_write_commit(const struct lu_env *env,
1199                                     struct dt_object *dt,
1200                                     struct niobuf_local *lnb, int npages,
1201                                     struct thandle *handle)
1202 {
1203         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1204         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1205         struct osd_thandle      *oh;
1206         int                     extents = 1;
1207         int                     depth;
1208         int                     i;
1209         int                     newblocks;
1210         int                     rc = 0;
1211         int                     flags = 0;
1212         int                     credits = 0;
1213         long long               quota_space = 0;
1214         struct osd_fextent      extent = { 0 };
1215         enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1216         ENTRY;
1217
1218         LASSERT(handle != NULL);
1219         oh = container_of0(handle, struct osd_thandle, ot_super);
1220         LASSERT(oh->ot_handle == NULL);
1221
1222         newblocks = npages;
1223
1224         /* calculate number of extents (probably better to pass nb) */
1225         for (i = 0; i < npages; i++) {
1226                 if (i && lnb[i].lnb_file_offset !=
1227                     lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
1228                         extents++;
1229
1230                 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent))
1231                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1232                 else
1233                         quota_space += PAGE_SIZE;
1234
1235                 /* ignore quota for the whole request if any page is from
1236                  * client cache or written by root.
1237                  *
1238                  * XXX once we drop the 1.8 client support, the checking
1239                  * for whether page is from cache can be simplified as:
1240                  * !(lnb[i].flags & OBD_BRW_SYNC)
1241                  *
1242                  * XXX we could handle this on per-lnb basis as done by
1243                  * grant. */
1244                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1245                     (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1246                     OBD_BRW_FROM_GRANT)
1247                         declare_flags |= OSD_QID_FORCE;
1248         }
1249
1250         /*
1251          * each extent can go into new leaf causing a split
1252          * 5 is max tree depth: inode + 4 index blocks
1253          * with blockmaps, depth is 3 at most
1254          */
1255         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1256                 /*
1257                  * many concurrent threads may grow tree by the time
1258                  * our transaction starts. so, consider 2 is a min depth
1259                  */
1260                 depth = ext_depth(inode);
1261                 depth = max(depth, 1) + 1;
1262                 newblocks += depth;
1263                 credits++; /* inode */
1264                 credits += depth * 2 * extents;
1265         } else {
1266                 depth = 3;
1267                 newblocks += depth;
1268                 credits++; /* inode */
1269                 credits += depth * extents;
1270         }
1271
1272         /* quota space for metadata blocks */
1273         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1274
1275         /* quota space should be reported in 1K blocks */
1276         quota_space = toqb(quota_space);
1277
1278         /* each new block can go in different group (bitmap + gd) */
1279
1280         /* we can't dirty more bitmap blocks than exist */
1281         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1282                 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1283         else
1284                 credits += newblocks;
1285
1286         /* we can't dirty more gd blocks than exist */
1287         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1288                 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1289         else
1290                 credits += newblocks;
1291
1292         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1293
1294         /* make sure the over quota flags were not set */
1295         lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1296
1297         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1298                                    i_projid_read(inode), quota_space, oh,
1299                                    osd_dt_obj(dt), &flags, declare_flags);
1300
1301         /* we need only to store the overquota flags in the first lnb for
1302          * now, once we support multiple objects BRW, this code needs be
1303          * revised. */
1304         if (flags & QUOTA_FL_OVER_USRQUOTA)
1305                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1306         if (flags & QUOTA_FL_OVER_GRPQUOTA)
1307                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1308         if (flags & QUOTA_FL_OVER_PRJQUOTA)
1309                 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1310
1311         if (rc == 0)
1312                 rc = osd_trunc_lock(osd_dt_obj(dt), oh, true);
1313
1314         RETURN(rc);
1315 }
1316
1317 /* Check if a block is allocated or not */
1318 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1319                             struct niobuf_local *lnb, int npages,
1320                             struct thandle *thandle)
1321 {
1322         struct osd_thread_info *oti = osd_oti_get(env);
1323         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1324         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1325         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1326         loff_t isize;
1327         int rc = 0, i;
1328
1329         LASSERT(inode);
1330
1331         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1332         if (unlikely(rc != 0))
1333                 RETURN(rc);
1334
1335         isize = i_size_read(inode);
1336         ll_vfs_dq_init(inode);
1337
1338         for (i = 0; i < npages; i++) {
1339                 if (lnb[i].lnb_rc == -ENOSPC &&
1340                     (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1341                         /* Allow the write to proceed if overwriting an
1342                          * existing block */
1343                         lnb[i].lnb_rc = 0;
1344                 }
1345
1346                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1347                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1348                                lnb[i].lnb_rc);
1349                         LASSERT(lnb[i].lnb_page);
1350                         generic_error_remove_page(inode->i_mapping,
1351                                                   lnb[i].lnb_page);
1352                         continue;
1353                 }
1354
1355                 LASSERT(PageLocked(lnb[i].lnb_page));
1356                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1357
1358                 if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize)
1359                         isize = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1360
1361                 /*
1362                  * Since write and truncate are serialized by oo_sem, even
1363                  * partial-page truncate should not leave dirty pages in the
1364                  * page cache.
1365                  */
1366                 LASSERT(!PageDirty(lnb[i].lnb_page));
1367
1368                 SetPageUptodate(lnb[i].lnb_page);
1369
1370                 osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1371         }
1372
1373         osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1374
1375         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1376                 rc = -ENOSPC;
1377         } else if (iobuf->dr_npages > 0) {
1378                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1379                                                  iobuf->dr_npages,
1380                                                  iobuf->dr_blocks, 1);
1381         } else {
1382                 /* no pages to write, no transno is needed */
1383                 thandle->th_local = 1;
1384         }
1385
1386         if (likely(rc == 0)) {
1387                 spin_lock(&inode->i_lock);
1388                 if (isize > i_size_read(inode)) {
1389                         i_size_write(inode, isize);
1390                         LDISKFS_I(inode)->i_disksize = isize;
1391                         spin_unlock(&inode->i_lock);
1392                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1393                 } else {
1394                         spin_unlock(&inode->i_lock);
1395                 }
1396
1397                 rc = osd_do_bio(osd, inode, iobuf);
1398                 /* we don't do stats here as in read path because
1399                  * write is async: we'll do this in osd_put_bufs() */
1400         } else {
1401                 osd_fini_iobuf(osd, iobuf);
1402         }
1403
1404         osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1405
1406         if (unlikely(rc != 0)) {
1407                 /* if write fails, we should drop pages from the cache */
1408                 for (i = 0; i < npages; i++) {
1409                         if (lnb[i].lnb_page == NULL)
1410                                 continue;
1411                         LASSERT(PageLocked(lnb[i].lnb_page));
1412                         generic_error_remove_page(inode->i_mapping,
1413                                                   lnb[i].lnb_page);
1414                 }
1415         }
1416
1417         RETURN(rc);
1418 }
1419
1420 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1421                          struct niobuf_local *lnb, int npages)
1422 {
1423         struct osd_thread_info *oti = osd_oti_get(env);
1424         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1425         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1426         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1427         int rc = 0, i, cache = 0, cache_hits = 0, cache_misses = 0;
1428         ktime_t start, end;
1429         s64 timediff;
1430         loff_t isize;
1431
1432         LASSERT(inode);
1433
1434         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1435         if (unlikely(rc != 0))
1436                 RETURN(rc);
1437
1438         isize = i_size_read(inode);
1439
1440         if (osd->od_read_cache)
1441                 cache = 1;
1442         if (isize > osd->od_readcache_max_filesize)
1443                 cache = 0;
1444
1445         start = ktime_get();
1446         for (i = 0; i < npages; i++) {
1447
1448                 if (isize <= lnb[i].lnb_file_offset)
1449                         /* If there's no more data, abort early.
1450                          * lnb->lnb_rc == 0, so it's easy to detect later. */
1451                         break;
1452
1453                 if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len)
1454                         lnb[i].lnb_rc = isize - lnb[i].lnb_file_offset;
1455                 else
1456                         lnb[i].lnb_rc = lnb[i].lnb_len;
1457
1458                 /* Bypass disk read if fail_loc is set properly */
1459                 if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
1460                         SetPageUptodate(lnb[i].lnb_page);
1461
1462                 if (PageUptodate(lnb[i].lnb_page)) {
1463                         cache_hits++;
1464                 } else {
1465                         cache_misses++;
1466                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1467                 }
1468
1469                 if (cache == 0)
1470                         generic_error_remove_page(inode->i_mapping,
1471                                                   lnb[i].lnb_page);
1472         }
1473         end = ktime_get();
1474         timediff = ktime_us_delta(end, start);
1475         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1476
1477         if (cache_hits != 0)
1478                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1479                                     cache_hits);
1480         if (cache_misses != 0)
1481                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1482                                     cache_misses);
1483         if (cache_hits + cache_misses != 0)
1484                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1485                                     cache_hits + cache_misses);
1486
1487         if (iobuf->dr_npages) {
1488                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1489                                                  iobuf->dr_npages,
1490                                                  iobuf->dr_blocks, 0);
1491                 rc = osd_do_bio(osd, inode, iobuf);
1492
1493                 /* IO stats will be done in osd_bufs_put() */
1494         }
1495
1496         RETURN(rc);
1497 }
1498
1499 /*
1500  * XXX: Another layering violation for now.
1501  *
1502  * We don't want to use ->f_op->read methods, because generic file write
1503  *
1504  *         - serializes on ->i_sem, and
1505  *
1506  *         - does a lot of extra work like balance_dirty_pages(),
1507  *
1508  * which doesn't work for globally shared files like /last_rcvd.
1509  */
1510 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1511 {
1512         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1513
1514         memcpy(buffer, (char *)ei->i_data, buflen);
1515
1516         return  buflen;
1517 }
1518
1519 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1520 {
1521         struct buffer_head *bh;
1522         unsigned long block;
1523         int osize;
1524         int blocksize;
1525         int csize;
1526         int boffs;
1527
1528         /* prevent reading after eof */
1529         spin_lock(&inode->i_lock);
1530         if (i_size_read(inode) < *offs + size) {
1531                 loff_t diff = i_size_read(inode) - *offs;
1532                 spin_unlock(&inode->i_lock);
1533                 if (diff < 0) {
1534                         CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
1535                                i_size_read(inode), *offs);
1536                         return -EBADR;
1537                 } else if (diff == 0) {
1538                         return 0;
1539                 } else {
1540                         size = diff;
1541                 }
1542         } else {
1543                 spin_unlock(&inode->i_lock);
1544         }
1545
1546         blocksize = 1 << inode->i_blkbits;
1547         osize = size;
1548         while (size > 0) {
1549                 block = *offs >> inode->i_blkbits;
1550                 boffs = *offs & (blocksize - 1);
1551                 csize = min(blocksize - boffs, size);
1552                 bh = __ldiskfs_bread(NULL, inode, block, 0);
1553                 if (IS_ERR(bh)) {
1554                         CERROR("%s: can't read %u@%llu on ino %lu: "
1555                                "rc = %ld\n", osd_ino2name(inode),
1556                                csize, *offs, inode->i_ino,
1557                                PTR_ERR(bh));
1558                         return PTR_ERR(bh);
1559                 }
1560
1561                 if (bh != NULL) {
1562                         memcpy(buf, bh->b_data + boffs, csize);
1563                         brelse(bh);
1564                 } else {
1565                         memset(buf, 0, csize);
1566                 }
1567
1568                 *offs += csize;
1569                 buf += csize;
1570                 size -= csize;
1571         }
1572         return osize;
1573 }
1574
1575 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1576                         struct lu_buf *buf, loff_t *pos)
1577 {
1578         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1579         int           rc;
1580
1581         /* Read small symlink from inode body as we need to maintain correct
1582          * on-disk symlinks for ldiskfs.
1583          */
1584         if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
1585             (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1586                 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
1587         else
1588                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1589
1590         return rc;
1591 }
1592
1593 static inline int osd_extents_enabled(struct super_block *sb,
1594                                       struct inode *inode)
1595 {
1596         if (inode != NULL) {
1597                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1598                         return 1;
1599         } else if (ldiskfs_has_feature_extents(sb)) {
1600                 return 1;
1601         }
1602         return 0;
1603 }
1604
1605 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1606                            const loff_t size, const loff_t pos,
1607                            const int blocks)
1608 {
1609         int credits, bits, bs, i;
1610
1611         bits = sb->s_blocksize_bits;
1612         bs = 1 << bits;
1613
1614         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1615          * we do not expect blockmaps on the large files,
1616          * so let's shrink it to 2 levels (4GB files) */
1617
1618         /* this is default reservation: 2 levels */
1619         credits = (blocks + 2) * 3;
1620
1621         /* actual offset is unknown, hard to optimize */
1622         if (pos == -1)
1623                 return credits;
1624
1625         /* now check for few specific cases to optimize */
1626         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1627                 /* no indirects */
1628                 credits = blocks;
1629                 /* allocate if not allocated */
1630                 if (inode == NULL) {
1631                         credits += blocks * 2;
1632                         return credits;
1633                 }
1634                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1635                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1636                         if (LDISKFS_I(inode)->i_data[i] == 0)
1637                                 credits += 2;
1638                 }
1639         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1640                 /* single indirect */
1641                 credits = blocks * 3;
1642                 if (inode == NULL ||
1643                     LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1644                         credits += 3;
1645                 else
1646                         /* The indirect block may be modified. */
1647                         credits += 1;
1648         }
1649
1650         return credits;
1651 }
1652
1653 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1654                                  const struct lu_buf *buf, loff_t _pos,
1655                                  struct thandle *handle)
1656 {
1657         struct osd_object  *obj  = osd_dt_obj(dt);
1658         struct inode       *inode = obj->oo_inode;
1659         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1660         struct osd_thandle *oh;
1661         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1662         int                 bits, bs;
1663         int                 depth, size;
1664         loff_t              pos;
1665         ENTRY;
1666
1667         LASSERT(buf != NULL);
1668         LASSERT(handle != NULL);
1669
1670         oh = container_of0(handle, struct osd_thandle, ot_super);
1671         LASSERT(oh->ot_handle == NULL);
1672
1673         size = buf->lb_len;
1674         bits = sb->s_blocksize_bits;
1675         bs = 1 << bits;
1676
1677         if (_pos == -1) {
1678                 /* if this is an append, then we
1679                  * should expect cross-block record */
1680                 pos = 0;
1681         } else {
1682                 pos = _pos;
1683         }
1684
1685         /* blocks to modify */
1686         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1687         LASSERT(blocks > 0);
1688
1689         if (inode != NULL && _pos != -1) {
1690                 /* object size in blocks */
1691                 est = (i_size_read(inode) + bs - 1) >> bits;
1692                 allocated = inode->i_blocks >> (bits - 9);
1693                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1694                         /* looks like an overwrite, no need to modify tree */
1695                         credits = blocks;
1696                         /* no need to modify i_size */
1697                         goto out;
1698                 }
1699         }
1700
1701         if (osd_extents_enabled(sb, inode)) {
1702                 /*
1703                  * many concurrent threads may grow tree by the time
1704                  * our transaction starts. so, consider 2 is a min depth
1705                  * for every level we may need to allocate a new block
1706                  * and take some entries from the old one. so, 3 blocks
1707                  * to allocate (bitmap, gd, itself) + old block - 4 per
1708                  * level.
1709                  */
1710                 depth = inode != NULL ? ext_depth(inode) : 0;
1711                 depth = max(depth, 1) + 1;
1712                 credits = depth;
1713                 /* if not append, then split may need to modify
1714                  * existing blocks moving entries into the new ones */
1715                 if (_pos != -1)
1716                         credits += depth;
1717                 /* blocks to store data: bitmap,gd,itself */
1718                 credits += blocks * 3;
1719         } else {
1720                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1721         }
1722         /* if inode is created as part of the transaction,
1723          * then it's counted already by the creation method */
1724         if (inode != NULL)
1725                 credits++;
1726
1727 out:
1728
1729         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1730
1731         /* dt_declare_write() is usually called for system objects, such
1732          * as llog or last_rcvd files. We needn't enforce quota on those
1733          * objects, so always set the lqi_space as 0. */
1734         if (inode != NULL)
1735                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1736                                            i_gid_read(inode),
1737                                            i_projid_read(inode), 0,
1738                                            oh, obj, NULL, OSD_QID_BLK);
1739
1740         if (rc == 0)
1741                 rc = osd_trunc_lock(obj, oh, true);
1742
1743         RETURN(rc);
1744 }
1745
1746 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1747 {
1748         /* LU-2634: clear the extent format for fast symlink */
1749         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1750
1751         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1752         spin_lock(&inode->i_lock);
1753         LDISKFS_I(inode)->i_disksize = buflen;
1754         i_size_write(inode, buflen);
1755         spin_unlock(&inode->i_lock);
1756         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1757
1758         return 0;
1759 }
1760
1761 int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
1762                              int write_NUL, loff_t *offs, handle_t *handle)
1763 {
1764         struct buffer_head *bh        = NULL;
1765         loff_t              offset    = *offs;
1766         loff_t              new_size  = i_size_read(inode);
1767         unsigned long       block;
1768         int                 blocksize = 1 << inode->i_blkbits;
1769         int                 err = 0;
1770         int                 size;
1771         int                 boffs;
1772         int                 dirty_inode = 0;
1773
1774         if (write_NUL) {
1775                 /*
1776                  * long symlink write does not count the NUL terminator in
1777                  * bufsize, we write it, and the inode's file size does not
1778                  * count the NUL terminator as well.
1779                  */
1780                 ((char *)buf)[bufsize] = '\0';
1781                 ++bufsize;
1782         }
1783
1784         while (bufsize > 0) {
1785                 int credits = handle->h_buffer_credits;
1786
1787                 if (bh)
1788                         brelse(bh);
1789
1790                 block = offset >> inode->i_blkbits;
1791                 boffs = offset & (blocksize - 1);
1792                 size = min(blocksize - boffs, bufsize);
1793                 bh = __ldiskfs_bread(handle, inode, block, 1);
1794                 if (IS_ERR_OR_NULL(bh)) {
1795                         if (bh == NULL) {
1796                                 err = -EIO;
1797                         } else {
1798                                 err = PTR_ERR(bh);
1799                                 bh = NULL;
1800                         }
1801
1802                         CERROR("%s: error reading offset %llu (block %lu, "
1803                                "size %d, offs %llu), credits %d/%d: rc = %d\n",
1804                                inode->i_sb->s_id, offset, block, bufsize, *offs,
1805                                credits, handle->h_buffer_credits, err);
1806                         break;
1807                 }
1808
1809                 err = ldiskfs_journal_get_write_access(handle, bh);
1810                 if (err) {
1811                         CERROR("journal_get_write_access() returned error %d\n",
1812                                err);
1813                         break;
1814                 }
1815                 LASSERTF(boffs + size <= bh->b_size,
1816                          "boffs %d size %d bh->b_size %lu\n",
1817                          boffs, size, (unsigned long)bh->b_size);
1818                 memcpy(bh->b_data + boffs, buf, size);
1819                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
1820                 if (err)
1821                         break;
1822
1823                 if (offset + size > new_size)
1824                         new_size = offset + size;
1825                 offset += size;
1826                 bufsize -= size;
1827                 buf += size;
1828         }
1829         if (bh)
1830                 brelse(bh);
1831
1832         if (write_NUL)
1833                 --new_size;
1834         /* correct in-core and on-disk sizes */
1835         if (new_size > i_size_read(inode)) {
1836                 spin_lock(&inode->i_lock);
1837                 if (new_size > i_size_read(inode))
1838                         i_size_write(inode, new_size);
1839                 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
1840                         LDISKFS_I(inode)->i_disksize = i_size_read(inode);
1841                         dirty_inode = 1;
1842                 }
1843                 spin_unlock(&inode->i_lock);
1844                 if (dirty_inode)
1845                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1846         }
1847
1848         if (err == 0)
1849                 *offs = offset;
1850         return err;
1851 }
1852
1853 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1854                          const struct lu_buf *buf, loff_t *pos,
1855                          struct thandle *handle, int ignore_quota)
1856 {
1857         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1858         struct osd_thandle      *oh;
1859         ssize_t                 result;
1860         int                     is_link;
1861
1862         LASSERT(dt_object_exists(dt));
1863
1864         LASSERT(handle != NULL);
1865         LASSERT(inode != NULL);
1866         ll_vfs_dq_init(inode);
1867
1868         /* XXX: don't check: one declared chunk can be used many times */
1869         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
1870
1871         oh = container_of(handle, struct osd_thandle, ot_super);
1872         LASSERT(oh->ot_handle->h_transaction != NULL);
1873         osd_trans_exec_op(env, handle, OSD_OT_WRITE);
1874
1875         /* Write small symlink to inode body as we need to maintain correct
1876          * on-disk symlinks for ldiskfs.
1877          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
1878          * does not count it in.
1879          */
1880         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
1881         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1882                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
1883         else
1884                 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
1885                                                   buf->lb_len, is_link, pos,
1886                                                   oh->ot_handle);
1887         if (result == 0)
1888                 result = buf->lb_len;
1889
1890         osd_trans_exec_check(env, handle, OSD_OT_WRITE);
1891
1892         return result;
1893 }
1894
1895 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1896                              __u64 start, __u64 end, struct thandle *th)
1897 {
1898         struct osd_thandle *oh;
1899         struct inode       *inode;
1900         int                 rc;
1901         ENTRY;
1902
1903         LASSERT(th);
1904         oh = container_of(th, struct osd_thandle, ot_super);
1905
1906         /*
1907          * we don't need to reserve credits for whole truncate
1908          * it's not possible as truncate may need to free too many
1909          * blocks and that won't fit a single transaction. instead
1910          * we reserve credits to change i_size and put inode onto
1911          * orphan list. if needed truncate will extend or restart
1912          * transaction
1913          */
1914         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
1915                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
1916
1917         inode = osd_dt_obj(dt)->oo_inode;
1918         LASSERT(inode);
1919
1920         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1921                                    i_projid_read(inode), 0, oh, osd_dt_obj(dt),
1922                                    NULL, OSD_QID_BLK);
1923
1924         if (rc == 0)
1925                 rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
1926
1927         RETURN(rc);
1928 }
1929
1930 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1931                      __u64 start, __u64 end, struct thandle *th)
1932 {
1933         struct osd_object *obj = osd_dt_obj(dt);
1934         struct osd_device *osd = osd_obj2dev(obj);
1935         struct inode *inode = obj->oo_inode;
1936         struct osd_access_lock *al;
1937         struct osd_thandle *oh;
1938         int rc = 0, found = 0;
1939         bool grow = false;
1940         ENTRY;
1941
1942         LASSERT(end == OBD_OBJECT_EOF);
1943         LASSERT(dt_object_exists(dt));
1944         LASSERT(osd_invariant(obj));
1945         LASSERT(inode != NULL);
1946         ll_vfs_dq_init(inode);
1947
1948         LASSERT(th);
1949         oh = container_of(th, struct osd_thandle, ot_super);
1950         LASSERT(oh->ot_handle->h_transaction != NULL);
1951
1952         /* we used to skip truncate to current size to
1953          * optimize truncates on OST. with DoM we can
1954          * get attr_set to set specific size (MDS_REINT)
1955          * and then get truncate RPC which essentially
1956          * would be skipped. this is bad.. so, disable
1957          * this optimization on MDS till the client stop
1958          * to sent MDS_REINT (LU-11033) -bzzz */
1959         if (osd->od_is_ost && i_size_read(inode) == start)
1960                 RETURN(0);
1961
1962         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
1963
1964         spin_lock(&inode->i_lock);
1965         if (i_size_read(inode) < start)
1966                 grow = true;
1967         i_size_write(inode, start);
1968         spin_unlock(&inode->i_lock);
1969         ll_truncate_pagecache(inode, start);
1970
1971         /* optimize grow case */
1972         if (grow) {
1973                 osd_execute_truncate(obj);
1974                 GOTO(out, rc);
1975         }
1976
1977         /* add to orphan list to ensure truncate completion
1978          * if this transaction succeed. ldiskfs_truncate()
1979          * will take the inode out of the list */
1980         rc = ldiskfs_orphan_add(oh->ot_handle, inode);
1981         if (rc != 0)
1982                 GOTO(out, rc);
1983
1984         list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
1985                 if (obj != al->tl_obj)
1986                         continue;
1987                 LASSERT(al->tl_shared == 0);
1988                 found = 1;
1989                 /* do actual truncate in osd_trans_stop() */
1990                 al->tl_truncate = 1;
1991                 break;
1992         }
1993         LASSERT(found);
1994
1995 out:
1996         RETURN(rc);
1997 }
1998
1999 static int fiemap_check_ranges(struct inode *inode,
2000                                u64 start, u64 len, u64 *new_len)
2001 {
2002         loff_t maxbytes;
2003
2004         *new_len = len;
2005
2006         if (len == 0)
2007                 return -EINVAL;
2008
2009         if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
2010                 maxbytes = inode->i_sb->s_maxbytes;
2011         else
2012                 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
2013
2014         if (start > maxbytes)
2015                 return -EFBIG;
2016
2017         /*
2018          * Shrink request scope to what the fs can actually handle.
2019          */
2020         if (len > maxbytes || (maxbytes - len) < start)
2021                 *new_len = maxbytes - start;
2022
2023         return 0;
2024 }
2025
2026 /* So that the fiemap access checks can't overflow on 32 bit machines. */
2027 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
2028
2029 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
2030                           struct fiemap *fm)
2031 {
2032         struct fiemap_extent_info fieinfo = {0, };
2033         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2034         u64 len;
2035         int rc;
2036
2037
2038         LASSERT(inode);
2039         if (inode->i_op->fiemap == NULL)
2040                 return -EOPNOTSUPP;
2041
2042         if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
2043                 return -EINVAL;
2044
2045         rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
2046         if (rc)
2047                 return rc;
2048
2049         fieinfo.fi_flags = fm->fm_flags;
2050         fieinfo.fi_extents_max = fm->fm_extent_count;
2051         fieinfo.fi_extents_start = fm->fm_extents;
2052
2053         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
2054                 filemap_write_and_wait(inode->i_mapping);
2055
2056         rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2057         fm->fm_flags = fieinfo.fi_flags;
2058         fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2059
2060         return rc;
2061 }
2062
2063 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2064                        __u64 start, __u64 end, enum lu_ladvise_type advice)
2065 {
2066         int              rc = 0;
2067         struct inode    *inode = osd_dt_obj(dt)->oo_inode;
2068         ENTRY;
2069
2070         switch (advice) {
2071         case LU_LADVISE_DONTNEED:
2072                 if (end == 0)
2073                         break;
2074                 invalidate_mapping_pages(inode->i_mapping,
2075                                          start >> PAGE_SHIFT,
2076                                          (end - 1) >> PAGE_SHIFT);
2077                 break;
2078         default:
2079                 rc = -ENOTSUPP;
2080                 break;
2081         }
2082
2083         RETURN(rc);
2084 }
2085
2086 /*
2087  * in some cases we may need declare methods for objects being created
2088  * e.g., when we create symlink
2089  */
2090 const struct dt_body_operations osd_body_ops_new = {
2091         .dbo_declare_write = osd_declare_write,
2092 };
2093
2094 const struct dt_body_operations osd_body_ops = {
2095         .dbo_read                       = osd_read,
2096         .dbo_declare_write              = osd_declare_write,
2097         .dbo_write                      = osd_write,
2098         .dbo_bufs_get                   = osd_bufs_get,
2099         .dbo_bufs_put                   = osd_bufs_put,
2100         .dbo_write_prep                 = osd_write_prep,
2101         .dbo_declare_write_commit       = osd_declare_write_commit,
2102         .dbo_write_commit               = osd_write_commit,
2103         .dbo_read_prep                  = osd_read_prep,
2104         .dbo_declare_punch              = osd_declare_punch,
2105         .dbo_punch                      = osd_punch,
2106         .dbo_fiemap_get                 = osd_fiemap_get,
2107         .dbo_ladvise                    = osd_ladvise,
2108 };
2109
2110 /**
2111  * Get a truncate lock
2112  *
2113  * In order to take multi-transaction truncate out of main transaction we let
2114  * the caller grab a lock on the object passed. the lock can be shared (for
2115  * writes) and exclusive (for truncate). It's not allowed to mix truncate
2116  * and write in the same transaction handle (do not confuse with big ldiskfs
2117  * transaction containing lots of handles).
2118  * The lock must be taken at declaration.
2119  *
2120  * \param obj           object to lock
2121  * \oh                  transaction
2122  * \shared              shared or exclusive
2123  *
2124  * \retval 0            lock is granted
2125  * \retval -NOMEM       no memory to allocate lock
2126  */
2127 int osd_trunc_lock(struct osd_object *obj, struct osd_thandle *oh, bool shared)
2128 {
2129         struct osd_access_lock *al, *tmp;
2130
2131         LASSERT(obj);
2132         LASSERT(oh);
2133
2134         list_for_each_entry(tmp, &oh->ot_trunc_locks, tl_list) {
2135                 if (tmp->tl_obj != obj)
2136                         continue;
2137                 LASSERT(tmp->tl_shared == shared);
2138                 /* found same lock */
2139                 return 0;
2140         }
2141
2142         OBD_ALLOC_PTR(al);
2143         if (unlikely(al == NULL))
2144                 return -ENOMEM;
2145         al->tl_obj = obj;
2146         al->tl_truncate = false;
2147         if (shared)
2148                 down_read(&obj->oo_ext_idx_sem);
2149         else
2150                 down_write(&obj->oo_ext_idx_sem);
2151         al->tl_shared = shared;
2152
2153         list_add(&al->tl_list, &oh->ot_trunc_locks);
2154
2155         return 0;
2156 }
2157
2158 void osd_trunc_unlock_all(struct list_head *list)
2159 {
2160         struct osd_access_lock *al, *tmp;
2161         list_for_each_entry_safe(al, tmp, list, tl_list) {
2162                 if (al->tl_shared)
2163                         up_read(&al->tl_obj->oo_ext_idx_sem);
2164                 else
2165                         up_write(&al->tl_obj->oo_ext_idx_sem);
2166                 list_del(&al->tl_list);
2167                 OBD_FREE_PTR(al);
2168         }
2169 }
2170
2171 void osd_execute_truncate(struct osd_object *obj)
2172 {
2173         struct inode *inode = obj->oo_inode;
2174         __u64 size;
2175
2176         /* simulate crash before (in the middle) of delayed truncate */
2177         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FAIL_AT_TRUNCATE)) {
2178                 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2179                 struct ldiskfs_sb_info *sbi = LDISKFS_SB(inode->i_sb);
2180
2181                 mutex_lock(&sbi->s_orphan_lock);
2182                 list_del_init(&ei->i_orphan);
2183                 mutex_unlock(&sbi->s_orphan_lock);
2184                 return;
2185         }
2186
2187 #ifdef HAVE_INODEOPS_TRUNCATE
2188         if (inode->i_op->truncate)
2189                 inode->i_op->truncate(inode);
2190         else
2191 #endif
2192                 ldiskfs_truncate(inode);
2193
2194         /*
2195          * For a partial-page truncate, flush the page to disk immediately to
2196          * avoid data corruption during direct disk write.  b=17397
2197          */
2198         size = i_size_read(inode);
2199         if ((size & ~PAGE_MASK) != 0)
2200                 filemap_fdatawrite_range(inode->i_mapping, size, size + 1);
2201 }
2202
2203 void osd_process_truncates(struct list_head *list)
2204 {
2205         struct osd_access_lock *al;
2206
2207         LASSERT(journal_current_handle() == NULL);
2208
2209         list_for_each_entry(al, list, tl_list) {
2210                 if (al->tl_shared)
2211                         continue;
2212                 if (!al->tl_truncate)
2213                         continue;
2214                 osd_execute_truncate(al->tl_obj);
2215         }
2216 }