Whamcloud - gitweb
55e238b0e749deb25650e2a9c090b28284b281d6
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd/osd_io.c
33  *
34  * body operations
35  *
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
38  *
39  */
40
41 /* prerequisite for linux/xattr.h */
42 #include <linux/types.h>
43 /* prerequisite for linux/xattr.h */
44 #include <linux/fs.h>
45 #include <linux/mm.h>
46 #include <linux/pagevec.h>
47
48 /*
49  * struct OBD_{ALLOC,FREE}*()
50  * OBD_FAIL_CHECK
51  */
52 #include <obd_support.h>
53
54 #include "osd_internal.h"
55
56 /* ext_depth() */
57 #include <ldiskfs/ldiskfs_extents.h>
58
59 static inline bool osd_use_page_cache(struct osd_device *d)
60 {
61         /* do not use pagecache if write and read caching are disabled */
62         if (d->od_writethrough_cache + d->od_read_cache == 0)
63                 return false;
64         /* use pagecache by default */
65         return true;
66 }
67
68 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
69                             int rw, int line, int pages)
70 {
71         int blocks, i;
72
73         LASSERTF(iobuf->dr_elapsed_valid == 0,
74                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
75                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
76                  iobuf->dr_init_at);
77         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
78
79         init_waitqueue_head(&iobuf->dr_wait);
80         atomic_set(&iobuf->dr_numreqs, 0);
81         iobuf->dr_npages = 0;
82         iobuf->dr_error = 0;
83         iobuf->dr_dev = d;
84         iobuf->dr_frags = 0;
85         iobuf->dr_elapsed = ktime_set(0, 0);
86         /* must be counted before, so assert */
87         iobuf->dr_rw = rw;
88         iobuf->dr_init_at = line;
89
90         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
91         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
92                 LASSERT(iobuf->dr_pg_buf.lb_len >=
93                         pages * sizeof(iobuf->dr_pages[0]));
94                 return 0;
95         }
96
97         /* start with 1MB for 4K blocks */
98         i = 256;
99         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
100                 i <<= 1;
101
102         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
103                (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
104         pages = i;
105         blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
106         iobuf->dr_max_pages = 0;
107         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
108                (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
109
110         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
111         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
112         if (unlikely(iobuf->dr_blocks == NULL))
113                 return -ENOMEM;
114
115         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
116         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
117         if (unlikely(iobuf->dr_pages == NULL))
118                 return -ENOMEM;
119
120         lu_buf_realloc(&iobuf->dr_lnb_buf,
121                        pages * sizeof(iobuf->dr_lnbs[0]));
122         iobuf->dr_lnbs = iobuf->dr_lnb_buf.lb_buf;
123         if (unlikely(iobuf->dr_lnbs == NULL))
124                 return -ENOMEM;
125
126         iobuf->dr_max_pages = pages;
127
128         return 0;
129 }
130 #define osd_init_iobuf(dev, iobuf, rw, pages) \
131         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
132
133 static void osd_iobuf_add_page(struct osd_iobuf *iobuf,
134                                struct niobuf_local *lnb)
135 {
136         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
137         iobuf->dr_pages[iobuf->dr_npages] = lnb->lnb_page;
138         iobuf->dr_lnbs[iobuf->dr_npages] = lnb;
139         iobuf->dr_npages++;
140 }
141
142 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
143 {
144         int rw = iobuf->dr_rw;
145
146         if (iobuf->dr_elapsed_valid) {
147                 iobuf->dr_elapsed_valid = 0;
148                 LASSERT(iobuf->dr_dev == d);
149                 LASSERT(iobuf->dr_frags > 0);
150                 lprocfs_oh_tally(&d->od_brw_stats.
151                                  hist[BRW_R_DIO_FRAGS+rw],
152                                  iobuf->dr_frags);
153                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
154                                       ktime_to_ms(iobuf->dr_elapsed));
155         }
156 }
157
158 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
159 static void dio_complete_routine(struct bio *bio)
160 {
161 # ifdef HAVE_BI_STATUS
162         int error = bio->bi_status;
163 # else
164         int error = bio->bi_error;
165 # endif
166 #else
167 static void dio_complete_routine(struct bio *bio, int error)
168 {
169 #endif
170         struct osd_iobuf *iobuf = bio->bi_private;
171         int iter;
172         struct bio_vec *bvl;
173
174         /* CAVEAT EMPTOR: possibly in IRQ context
175          * DO NOT record procfs stats here!!! */
176
177         if (unlikely(iobuf == NULL)) {
178                 CERROR("***** bio->bi_private is NULL!  This should never "
179                        "happen.  Normally, I would crash here, but instead I "
180                        "will dump the bio contents to the console.  Please "
181                        "report this to <https://jira.whamcloud.com/> , along "
182                        "with any interesting messages leading up to this point "
183                        "(like SCSI errors, perhaps).  Because bi_private is "
184                        "NULL, I can't wake up the thread that initiated this "
185                        "IO - you will probably have to reboot this node.\n");
186                 CERROR("bi_next: %p, bi_flags: %lx, "
187 #ifdef HAVE_BI_RW
188                        "bi_rw: %lu,"
189 #else
190                        "bi_opf: %u,"
191 #endif
192                        "bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p,"
193                        "bi_cnt: %d, bi_private: %p\n", bio->bi_next,
194                         (unsigned long)bio->bi_flags,
195 #ifdef HAVE_BI_RW
196                         bio->bi_rw,
197 #else
198                         bio->bi_opf,
199 #endif
200                         bio->bi_vcnt, bio_idx(bio),
201                         bio_sectors(bio) << 9, bio->bi_end_io,
202 #ifdef HAVE_BI_CNT
203                         atomic_read(&bio->bi_cnt),
204 #else
205                         atomic_read(&bio->__bi_cnt),
206 #endif
207                         bio->bi_private);
208                 return;
209         }
210
211         /* the check is outside of the cycle for performance reason -bzzz */
212         if (!bio_data_dir(bio)) {
213                 bio_for_each_segment_all(bvl, bio, iter) {
214                         if (likely(error == 0))
215                                 SetPageUptodate(bvl_to_page(bvl));
216                         LASSERT(PageLocked(bvl_to_page(bvl)));
217                 }
218                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
219         } else {
220                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
221         }
222
223         /* any real error is good enough -bzzz */
224         if (error != 0 && iobuf->dr_error == 0)
225                 iobuf->dr_error = error;
226
227         /*
228          * set dr_elapsed before dr_numreqs turns to 0, otherwise
229          * it's possible that service thread will see dr_numreqs
230          * is zero, but dr_elapsed is not set yet, leading to lost
231          * data in this processing and an assertion in a subsequent
232          * call to OSD.
233          */
234         if (atomic_read(&iobuf->dr_numreqs) == 1) {
235                 ktime_t now = ktime_get();
236
237                 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
238                 iobuf->dr_elapsed_valid = 1;
239         }
240         if (atomic_dec_and_test(&iobuf->dr_numreqs))
241                 wake_up(&iobuf->dr_wait);
242
243         /* Completed bios used to be chained off iobuf->dr_bios and freed in
244          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
245          * mempool when serious on-disk fragmentation was encountered,
246          * deadlocking the OST.  The bios are now released as soon as complete
247          * so the pool cannot be exhausted while IOs are competing. bug 10076 */
248         bio_put(bio);
249 }
250
251 static void record_start_io(struct osd_iobuf *iobuf, int size)
252 {
253         struct osd_device    *osd = iobuf->dr_dev;
254         struct obd_histogram *h = osd->od_brw_stats.hist;
255
256         iobuf->dr_frags++;
257         atomic_inc(&iobuf->dr_numreqs);
258
259         if (iobuf->dr_rw == 0) {
260                 atomic_inc(&osd->od_r_in_flight);
261                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
262                                  atomic_read(&osd->od_r_in_flight));
263                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
264         } else if (iobuf->dr_rw == 1) {
265                 atomic_inc(&osd->od_w_in_flight);
266                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
267                                  atomic_read(&osd->od_w_in_flight));
268                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
269         } else {
270                 LBUG();
271         }
272 }
273
274 static void osd_submit_bio(int rw, struct bio *bio)
275 {
276         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
277 #ifdef HAVE_SUBMIT_BIO_2ARGS
278         if (rw == 0)
279                 submit_bio(READ, bio);
280         else
281                 submit_bio(WRITE, bio);
282 #else
283         bio->bi_opf |= rw;
284         submit_bio(bio);
285 #endif
286 }
287
288 static int can_be_merged(struct bio *bio, sector_t sector)
289 {
290         if (bio == NULL)
291                 return 0;
292
293         return bio_end_sector(bio) == sector ? 1 : 0;
294 }
295
296 /*
297  * This function will change the data written, thus it should only be
298  * used when checking data integrity feature
299  */
300 static void bio_integrity_fault_inject(struct bio *bio)
301 {
302         struct bio_vec *bvec;
303         int i;
304         void *kaddr;
305         char *addr;
306
307         bio_for_each_segment_all(bvec, bio, i) {
308                 struct page *page = bvec->bv_page;
309
310                 kaddr = kmap(page);
311                 addr = kaddr;
312                 *addr = ~(*addr);
313                 kunmap(page);
314                 break;
315         }
316 }
317
318 static int bio_dif_compare(__u16 *expected_guard_buf, void *bio_prot_buf,
319                            unsigned int sectors, int tuple_size)
320 {
321         __u16 *expected_guard;
322         __u16 *bio_guard;
323         int i;
324
325         expected_guard = expected_guard_buf;
326         for (i = 0; i < sectors; i++) {
327                 bio_guard = (__u16 *)bio_prot_buf;
328                 if (*bio_guard != *expected_guard) {
329                         CERROR("unexpected guard tags on sector %d "
330                                "expected guard %u, bio guard "
331                                "%u, sectors %u, tuple size %d\n",
332                                i, *expected_guard, *bio_guard, sectors,
333                                tuple_size);
334                         return -EIO;
335                 }
336                 expected_guard++;
337                 bio_prot_buf += tuple_size;
338         }
339         return 0;
340 }
341
342 static int osd_bio_integrity_compare(struct bio *bio, struct block_device *bdev,
343                                      struct osd_iobuf *iobuf, int index)
344 {
345         struct blk_integrity *bi = bdev_get_integrity(bdev);
346         struct bio_integrity_payload *bip = bio->bi_integrity;
347         struct niobuf_local *lnb;
348         unsigned short sector_size = blk_integrity_interval(bi);
349         void *bio_prot_buf = page_address(bip->bip_vec->bv_page) +
350                 bip->bip_vec->bv_offset;
351         struct bio_vec *bv;
352         sector_t sector = bio_start_sector(bio);
353         unsigned int i, sectors, total;
354         __u16 *expected_guard;
355         int rc;
356
357         total = 0;
358         bio_for_each_segment_all(bv, bio, i) {
359                 lnb = iobuf->dr_lnbs[index];
360                 expected_guard = lnb->lnb_guards;
361                 sectors = bv->bv_len / sector_size;
362                 if (lnb->lnb_guard_rpc) {
363                         rc = bio_dif_compare(expected_guard, bio_prot_buf,
364                                              sectors, bi->tuple_size);
365                         if (rc)
366                                 return rc;
367                 }
368
369                 sector += sectors;
370                 bio_prot_buf += sectors * bi->tuple_size;
371                 total += sectors * bi->tuple_size;
372                 LASSERT(total <= bip_size(bio->bi_integrity));
373                 index++;
374         }
375         return 0;
376 }
377
378 static int osd_bio_integrity_handle(struct osd_device *osd, struct bio *bio,
379                                     struct osd_iobuf *iobuf,
380                                     int start_page_idx, bool fault_inject,
381                                     bool integrity_enabled)
382 {
383         struct super_block *sb = osd_sb(osd);
384         int rc;
385 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
386         integrity_gen_fn *generate_fn = NULL;
387         integrity_vrfy_fn *verify_fn = NULL;
388 #endif
389
390         ENTRY;
391
392         if (!integrity_enabled)
393                 RETURN(0);
394
395 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
396         rc = osd_get_integrity_profile(osd, &generate_fn, &verify_fn);
397         if (rc)
398                 RETURN(rc);
399
400         rc = bio_integrity_prep_fn(bio, generate_fn, verify_fn);
401 #else
402         rc = bio_integrity_prep(bio);
403 #endif
404         if (rc)
405                 RETURN(rc);
406
407         /* Verify and inject fault only when writing */
408         if (iobuf->dr_rw == 1) {
409                 if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_CMP))) {
410                         rc = osd_bio_integrity_compare(bio, sb->s_bdev, iobuf,
411                                                        start_page_idx);
412                         if (rc)
413                                 RETURN(rc);
414                 }
415
416                 if (unlikely(fault_inject))
417                         bio_integrity_fault_inject(bio);
418         }
419
420         RETURN(0);
421 }
422
423 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
424 #  ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
425 static void dio_integrity_complete_routine(struct bio *bio)
426 {
427 #  else
428 static void dio_integrity_complete_routine(struct bio *bio, int error)
429 {
430 #  endif
431         struct osd_bio_private *bio_private = bio->bi_private;
432
433         bio->bi_private = bio_private->obp_iobuf;
434 #  ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
435         dio_complete_routine(bio);
436 #  else
437         dio_complete_routine(bio, error);
438 #  endif
439
440         OBD_FREE_PTR(bio_private);
441 }
442 #endif
443
444 static int osd_bio_init(struct bio *bio, struct osd_iobuf *iobuf,
445                         bool integrity_enabled, int start_page_idx,
446                         struct osd_bio_private **pprivate)
447 {
448 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
449         struct osd_bio_private *bio_private;
450
451         ENTRY;
452
453         *pprivate = NULL;
454         if (integrity_enabled) {
455                 OBD_ALLOC_GFP(bio_private, sizeof(*bio_private), GFP_NOIO);
456                 if (bio_private == NULL)
457                         RETURN(-ENOMEM);
458                 bio->bi_end_io = dio_integrity_complete_routine;
459                 bio->bi_private = bio_private;
460                 bio_private->obp_start_page_idx = start_page_idx;
461                 bio_private->obp_iobuf = iobuf;
462                 *pprivate = bio_private;
463         } else {
464                 bio->bi_end_io = dio_complete_routine;
465                 bio->bi_private = iobuf;
466         }
467         RETURN(0);
468 #else
469         ENTRY;
470
471         bio->bi_end_io = dio_complete_routine;
472         bio->bi_private = iobuf;
473         RETURN(0);
474 #endif
475 }
476
477 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
478                       struct osd_iobuf *iobuf)
479 {
480         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
481         struct page **pages = iobuf->dr_pages;
482         int npages = iobuf->dr_npages;
483         sector_t *blocks = iobuf->dr_blocks;
484         int total_blocks = npages * blocks_per_page;
485         struct super_block *sb = inode->i_sb;
486         int sector_bits = sb->s_blocksize_bits - 9;
487         unsigned int blocksize = sb->s_blocksize;
488         struct block_device *bdev = sb->s_bdev;
489         struct osd_bio_private *bio_private = NULL;
490         struct bio *bio = NULL;
491         int bio_start_page_idx;
492         struct page *page;
493         unsigned int page_offset;
494         sector_t sector;
495         int nblocks;
496         int block_idx;
497         int page_idx;
498         int i;
499         int rc = 0;
500         bool fault_inject;
501         bool integrity_enabled;
502         DECLARE_PLUG(plug);
503         ENTRY;
504
505         fault_inject = OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT);
506         LASSERT(iobuf->dr_npages == npages);
507
508         integrity_enabled = bdev_integrity_enabled(bdev, iobuf->dr_rw);
509
510         osd_brw_stats_update(osd, iobuf);
511         iobuf->dr_start_time = ktime_get();
512
513         blk_start_plug(&plug);
514         for (page_idx = 0, block_idx = 0;
515              page_idx < npages;
516              page_idx++, block_idx += blocks_per_page) {
517
518                 page = pages[page_idx];
519                 LASSERT(block_idx + blocks_per_page <= total_blocks);
520
521                 for (i = 0, page_offset = 0;
522                      i < blocks_per_page;
523                      i += nblocks, page_offset += blocksize * nblocks) {
524
525                         nblocks = 1;
526
527                         if (blocks[block_idx + i] == 0) {  /* hole */
528                                 LASSERTF(iobuf->dr_rw == 0,
529                                          "page_idx %u, block_idx %u, i %u\n",
530                                          page_idx, block_idx, i);
531                                 memset(kmap(page) + page_offset, 0, blocksize);
532                                 kunmap(page);
533                                 continue;
534                         }
535
536                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
537
538                         /* Additional contiguous file blocks? */
539                         while (i + nblocks < blocks_per_page &&
540                                (sector + (nblocks << sector_bits)) ==
541                                ((sector_t)blocks[block_idx + i + nblocks] <<
542                                 sector_bits))
543                                 nblocks++;
544
545                         if (bio != NULL &&
546                             can_be_merged(bio, sector) &&
547                             bio_add_page(bio, page,
548                                          blocksize * nblocks, page_offset) != 0)
549                                 continue;       /* added this frag OK */
550
551                         if (bio != NULL) {
552                                 struct request_queue *q = bio_get_queue(bio);
553                                 unsigned int bi_size = bio_sectors(bio) << 9;
554
555                                 /* Dang! I have to fragment this I/O */
556                                 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
557                                        "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
558                                        bi_size, bio->bi_vcnt, bio->bi_max_vecs,
559                                        bio_sectors(bio),
560                                        queue_max_sectors(q),
561                                        bio_phys_segments(q, bio),
562                                        queue_max_phys_segments(q),
563                                        0, queue_max_hw_segments(q));
564                                 rc = osd_bio_integrity_handle(osd, bio,
565                                         iobuf, bio_start_page_idx,
566                                         fault_inject, integrity_enabled);
567                                 if (rc) {
568                                         bio_put(bio);
569                                         goto out;
570                                 }
571
572                                 record_start_io(iobuf, bi_size);
573                                 osd_submit_bio(iobuf->dr_rw, bio);
574                         }
575
576                         bio_start_page_idx = page_idx;
577                         /* allocate new bio */
578                         bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
579                                                       (npages - page_idx) *
580                                                       blocks_per_page));
581                         if (bio == NULL) {
582                                 CERROR("Can't allocate bio %u*%u = %u pages\n",
583                                        (npages - page_idx), blocks_per_page,
584                                        (npages - page_idx) * blocks_per_page);
585                                 rc = -ENOMEM;
586                                 goto out;
587                         }
588
589                         bio_set_dev(bio, bdev);
590                         bio_set_sector(bio, sector);
591 #ifdef HAVE_BI_RW
592                         bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
593 #else
594                         bio->bi_opf = (iobuf->dr_rw == 0) ? READ : WRITE;
595 #endif
596                         rc = osd_bio_init(bio, iobuf, integrity_enabled,
597                                           bio_start_page_idx, &bio_private);
598                         if (rc) {
599                                 bio_put(bio);
600                                 goto out;
601                         }
602
603                         rc = bio_add_page(bio, page,
604                                           blocksize * nblocks, page_offset);
605                         LASSERT(rc != 0);
606                 }
607         }
608
609         if (bio != NULL) {
610                 rc = osd_bio_integrity_handle(osd, bio, iobuf,
611                                               bio_start_page_idx,
612                                               fault_inject,
613                                               integrity_enabled);
614                 if (rc) {
615                         bio_put(bio);
616                         goto out;
617                 }
618
619                 record_start_io(iobuf, bio_sectors(bio) << 9);
620                 osd_submit_bio(iobuf->dr_rw, bio);
621                 rc = 0;
622         }
623
624 out:
625         blk_finish_plug(&plug);
626
627         /* in order to achieve better IO throughput, we don't wait for writes
628          * completion here. instead we proceed with transaction commit in
629          * parallel and wait for IO completion once transaction is stopped
630          * see osd_trans_stop() for more details -bzzz */
631         if (iobuf->dr_rw == 0 || fault_inject) {
632                 wait_event(iobuf->dr_wait,
633                            atomic_read(&iobuf->dr_numreqs) == 0);
634                 osd_fini_iobuf(osd, iobuf);
635         }
636
637         if (rc == 0) {
638                 rc = iobuf->dr_error;
639         } else {
640                 if (bio_private)
641                         OBD_FREE_PTR(bio_private);
642         }
643
644         RETURN(rc);
645 }
646
647 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
648                                    struct niobuf_local *lnb)
649 {
650         ENTRY;
651
652         *nrpages = 0;
653
654         while (len > 0) {
655                 int poff = offset & (PAGE_SIZE - 1);
656                 int plen = PAGE_SIZE - poff;
657
658                 if (plen > len)
659                         plen = len;
660                 lnb->lnb_file_offset = offset;
661                 lnb->lnb_page_offset = poff;
662                 lnb->lnb_len = plen;
663                 /* lnb->lnb_flags = rnb->rnb_flags; */
664                 lnb->lnb_flags = 0;
665                 lnb->lnb_page = NULL;
666                 lnb->lnb_rc = 0;
667                 lnb->lnb_guard_rpc = 0;
668                 lnb->lnb_guard_disk = 0;
669
670                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
671                          (long long) len);
672                 offset += plen;
673                 len -= plen;
674                 lnb++;
675                 (*nrpages)++;
676         }
677
678         RETURN(0);
679 }
680
681 static struct page *osd_get_page(const struct lu_env *env, struct dt_object *dt,
682                                  loff_t offset, gfp_t gfp_mask)
683 {
684         struct osd_thread_info *oti = osd_oti_get(env);
685         struct inode *inode = osd_dt_obj(dt)->oo_inode;
686         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
687         struct page *page;
688         int cur = oti->oti_dio_pages_used;
689
690         LASSERT(inode);
691
692         if (osd_use_page_cache(d)) {
693                 page = find_or_create_page(inode->i_mapping,
694                                            offset >> PAGE_SHIFT,
695                                            gfp_mask);
696
697                 if (likely(page))
698                         LASSERT(!test_bit(PG_private_2, &page->flags));
699                 else
700                         lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
701         } else {
702
703                 LASSERT(oti->oti_dio_pages);
704
705                 if (unlikely(!oti->oti_dio_pages[cur])) {
706                         LASSERT(cur < PTLRPC_MAX_BRW_PAGES);
707                         page = alloc_page(gfp_mask);
708                         if (!page)
709                                 return NULL;
710                         oti->oti_dio_pages[cur] = page;
711                 }
712
713                 page = oti->oti_dio_pages[cur];
714                 LASSERT(!test_bit(PG_private_2, &page->flags));
715                 set_bit(PG_private_2, &page->flags);
716                 oti->oti_dio_pages_used++;
717
718                 LASSERT(!PageLocked(page));
719                 lock_page(page);
720
721                 LASSERT(!page->mapping);
722                 LASSERT(!PageWriteback(page));
723                 ClearPageUptodate(page);
724
725                 page->index = offset >> PAGE_SHIFT;
726         }
727
728         return page;
729 }
730
731 /*
732  * there are following "locks":
733  * journal_start
734  * i_mutex
735  * page lock
736  *
737  * osd write path:
738  *  - lock page(s)
739  *  - journal_start
740  *  - truncate_sem
741  *
742  * ext4 vmtruncate:
743  *  - lock pages, unlock
744  *  - journal_start
745  *  - lock partial page
746  *  - i_data_sem
747  *
748  */
749
750 /**
751  * Unlock and release pages loaded by osd_bufs_get()
752  *
753  * Unlock \a npages pages from \a lnb and drop the refcount on them.
754  *
755  * \param env           thread execution environment
756  * \param dt            dt object undergoing IO (OSD object + methods)
757  * \param lnb           array of pages undergoing IO
758  * \param npages        number of pages in \a lnb
759  *
760  * \retval 0            always
761  */
762 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
763                         struct niobuf_local *lnb, int npages)
764 {
765         struct osd_thread_info *oti = osd_oti_get(env);
766         struct pagevec pvec;
767         int i;
768
769 #ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
770         pagevec_init(&pvec);
771 #else
772         pagevec_init(&pvec, 0);
773 #endif
774
775         for (i = 0; i < npages; i++) {
776                 struct page *page = lnb[i].lnb_page;
777
778                 if (page == NULL)
779                         continue;
780                 LASSERT(PageLocked(page));
781
782                 /* if the page isn't cached, then reset uptodate
783                  * to prevent reuse */
784                 if (test_bit(PG_private_2, &page->flags)) {
785                         clear_bit(PG_private_2, &page->flags);
786                         ClearPageUptodate(page);
787                         unlock_page(page);
788                         oti->oti_dio_pages_used--;
789                 } else {
790                         unlock_page(page);
791                         if (pagevec_add(&pvec, page) == 0)
792                                 pagevec_release(&pvec);
793                 }
794                 dt_object_put(env, dt);
795
796                 lnb[i].lnb_page = NULL;
797         }
798
799         LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used);
800
801         /* Release any partial pagevec */
802         pagevec_release(&pvec);
803
804         RETURN(0);
805 }
806
807 /**
808  * Load and lock pages undergoing IO
809  *
810  * Pages as described in the \a lnb array are fetched (from disk or cache)
811  * and locked for IO by the caller.
812  *
813  * DLM locking protects us from write and truncate competing for same region,
814  * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
815  * It's possible the writeout on a such a page is in progress when we access
816  * it. It's also possible that during this writeout we put new (partial) data
817  * into the page, but won't be able to proceed in filter_commitrw_write().
818  * Therefore, just wait for writeout completion as it should be rare enough.
819  *
820  * \param env           thread execution environment
821  * \param dt            dt object undergoing IO (OSD object + methods)
822  * \param pos           byte offset of IO start
823  * \param len           number of bytes of IO
824  * \param lnb           array of extents undergoing IO
825  * \param rw            read or write operation, and other flags
826  * \param capa          capabilities
827  *
828  * \retval pages        (zero or more) loaded successfully
829  * \retval -ENOMEM      on memory/page allocation error
830  */
831 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
832                         loff_t pos, ssize_t len, struct niobuf_local *lnb,
833                         enum dt_bufs_type rw)
834 {
835         struct osd_thread_info *oti = osd_oti_get(env);
836         struct osd_object *obj = osd_dt_obj(dt);
837         int npages, i, rc = 0;
838         gfp_t gfp_mask;
839
840         LASSERT(obj->oo_inode);
841
842         if (!osd_use_page_cache(osd_obj2dev(obj))) {
843                 if (unlikely(!oti->oti_dio_pages)) {
844                         OBD_ALLOC(oti->oti_dio_pages,
845                                   sizeof(struct page *) * PTLRPC_MAX_BRW_PAGES);
846                         if (!oti->oti_dio_pages)
847                                 return -ENOMEM;
848                 }
849         }
850
851         osd_map_remote_to_local(pos, len, &npages, lnb);
852
853         /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
854         gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
855                                              GFP_HIGHUSER;
856         for (i = 0; i < npages; i++, lnb++) {
857                 lnb->lnb_page = osd_get_page(env, dt, lnb->lnb_file_offset,
858                                              gfp_mask);
859                 if (lnb->lnb_page == NULL)
860                         GOTO(cleanup, rc = -ENOMEM);
861
862                 wait_on_page_writeback(lnb->lnb_page);
863                 BUG_ON(PageWriteback(lnb->lnb_page));
864
865                 lu_object_get(&dt->do_lu);
866         }
867
868         RETURN(i);
869
870 cleanup:
871         if (i > 0)
872                 osd_bufs_put(env, dt, lnb - i, i);
873         return rc;
874 }
875
876 #ifndef HAVE_LDISKFS_MAP_BLOCKS
877
878 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
879 #define ldiskfs_ext_pblock(ex) ext_pblock((ex))
880 #endif
881
882 struct bpointers {
883         sector_t *blocks;
884         unsigned long start;
885         int num;
886         int init_num;
887         int create;
888 };
889
890 static long ldiskfs_ext_find_goal(struct inode *inode,
891                                   struct ldiskfs_ext_path *path,
892                                   unsigned long block, int *aflags)
893 {
894         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
895         unsigned long bg_start;
896         unsigned long colour;
897         int depth;
898
899         if (path) {
900                 struct ldiskfs_extent *ex;
901                 depth = path->p_depth;
902
903                 /* try to predict block placement */
904                 if ((ex = path[depth].p_ext))
905                         return ldiskfs_ext_pblock(ex) +
906                                 (block - le32_to_cpu(ex->ee_block));
907
908                 /* it looks index is empty
909                  * try to find starting from index itself */
910                 if (path[depth].p_bh)
911                         return path[depth].p_bh->b_blocknr;
912         }
913
914         /* OK. use inode's group */
915         bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) +
916                 le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block);
917         colour = (current->pid % 16) *
918                 (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16);
919         return bg_start + colour + block;
920 }
921
922 static unsigned long new_blocks(handle_t *handle, struct inode *inode,
923                                 struct ldiskfs_ext_path *path,
924                                 unsigned long block, unsigned long *count,
925                                 int *err)
926 {
927         struct ldiskfs_allocation_request ar;
928         unsigned long pblock;
929         int aflags;
930
931         /* find neighbour allocated blocks */
932         ar.lleft = block;
933         *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
934         if (*err)
935                 return 0;
936         ar.lright = block;
937         *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright);
938         if (*err)
939                 return 0;
940
941         /* allocate new block */
942         ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags);
943         ar.inode = inode;
944         ar.logical = block;
945         ar.len = *count;
946         ar.flags = LDISKFS_MB_HINT_DATA;
947         pblock = ldiskfs_mb_new_blocks(handle, &ar, err);
948         *count = ar.len;
949         return pblock;
950 }
951
952 static int ldiskfs_ext_new_extent_cb(struct inode *inode,
953                                      struct ldiskfs_ext_path *path,
954                                      struct ldiskfs_ext_cache *cex,
955 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
956                                      struct ldiskfs_extent *ex,
957 #endif
958                                      void *cbdata)
959 {
960         struct bpointers *bp = cbdata;
961         struct ldiskfs_extent nex;
962         unsigned long pblock = 0;
963         unsigned long tgen;
964         int err, i;
965         unsigned long count;
966         handle_t *handle;
967
968 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
969         if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) {
970 #else
971         if ((cex->ec_len != 0) && (cex->ec_start != 0)) {
972 #endif
973                 err = EXT_CONTINUE;
974                 goto map;
975         }
976
977         if (bp->create == 0) {
978                 i = 0;
979                 if (cex->ec_block < bp->start)
980                         i = bp->start - cex->ec_block;
981                 if (i >= cex->ec_len)
982                         CERROR("nothing to do?! i = %d, e_num = %u\n",
983                                         i, cex->ec_len);
984                 for (; i < cex->ec_len && bp->num; i++) {
985                         *(bp->blocks) = 0;
986                         bp->blocks++;
987                         bp->num--;
988                         bp->start++;
989                 }
990
991                 return EXT_CONTINUE;
992         }
993
994         tgen = LDISKFS_I(inode)->i_ext_generation;
995         count = ldiskfs_ext_calc_credits_for_insert(inode, path);
996
997         handle = osd_journal_start(inode, LDISKFS_HT_MISC,
998                                    count + LDISKFS_ALLOC_NEEDED + 1);
999         if (IS_ERR(handle)) {
1000                 return PTR_ERR(handle);
1001         }
1002
1003         if (tgen != LDISKFS_I(inode)->i_ext_generation) {
1004                 /* the tree has changed. so path can be invalid at moment */
1005                 ldiskfs_journal_stop(handle);
1006                 return EXT_REPEAT;
1007         }
1008
1009         /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not
1010          * protected by i_data_sem as whole. so we patch it to store
1011          * generation to path and now verify the tree hasn't changed */
1012         down_write((&LDISKFS_I(inode)->i_data_sem));
1013
1014         /* validate extent, make sure the extent tree does not changed */
1015         if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) {
1016                 /* cex is invalid, try again */
1017                 up_write(&LDISKFS_I(inode)->i_data_sem);
1018                 ldiskfs_journal_stop(handle);
1019                 return EXT_REPEAT;
1020         }
1021
1022         count = cex->ec_len;
1023         pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err);
1024         if (!pblock)
1025                 goto out;
1026         BUG_ON(count > cex->ec_len);
1027
1028         /* insert new extent */
1029         nex.ee_block = cpu_to_le32(cex->ec_block);
1030         ldiskfs_ext_store_pblock(&nex, pblock);
1031         nex.ee_len = cpu_to_le16(count);
1032         err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0);
1033         if (err) {
1034                 /* free data blocks we just allocated */
1035                 /* not a good idea to call discard here directly,
1036                  * but otherwise we'd need to call it every free() */
1037                 ldiskfs_discard_preallocations(inode);
1038 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
1039                 ldiskfs_free_blocks(handle, inode, NULL,
1040                                     ldiskfs_ext_pblock(&nex),
1041                                     le16_to_cpu(nex.ee_len), 0);
1042 #else
1043                 ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex),
1044                                     le16_to_cpu(nex.ee_len), 0);
1045 #endif
1046                 goto out;
1047         }
1048
1049         /*
1050          * Putting len of the actual extent we just inserted,
1051          * we are asking ldiskfs_ext_walk_space() to continue
1052          * scaning after that block
1053          */
1054         cex->ec_len = le16_to_cpu(nex.ee_len);
1055         cex->ec_start = ldiskfs_ext_pblock(&nex);
1056         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
1057         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
1058
1059 out:
1060         up_write((&LDISKFS_I(inode)->i_data_sem));
1061         ldiskfs_journal_stop(handle);
1062 map:
1063         if (err >= 0) {
1064                 /* map blocks */
1065                 if (bp->num == 0) {
1066                         CERROR("hmm. why do we find this extent?\n");
1067                         CERROR("initial space: %lu:%u\n",
1068                                 bp->start, bp->init_num);
1069 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
1070                         CERROR("current extent: %u/%u/%llu %d\n",
1071                                 cex->ec_block, cex->ec_len,
1072                                 (unsigned long long)cex->ec_start,
1073                                 cex->ec_type);
1074 #else
1075                         CERROR("current extent: %u/%u/%llu\n",
1076                                 cex->ec_block, cex->ec_len,
1077                                 (unsigned long long)cex->ec_start);
1078 #endif
1079                 }
1080                 i = 0;
1081                 if (cex->ec_block < bp->start)
1082                         i = bp->start - cex->ec_block;
1083                 if (i >= cex->ec_len)
1084                         CERROR("nothing to do?! i = %d, e_num = %u\n",
1085                                         i, cex->ec_len);
1086                 for (; i < cex->ec_len && bp->num; i++) {
1087                         *(bp->blocks) = cex->ec_start + i;
1088                         if (pblock != 0) {
1089                                 /* unmap any possible underlying metadata from
1090                                  * the block device mapping.  bug 6998. */
1091 #ifndef HAVE_CLEAN_BDEV_ALIASES
1092                                 unmap_underlying_metadata(inode->i_sb->s_bdev,
1093                                                           *(bp->blocks));
1094 #else
1095                                 clean_bdev_aliases(inode->i_sb->s_bdev,
1096                                                    *(bp->blocks), 1);
1097 #endif
1098                         }
1099                         bp->blocks++;
1100                         bp->num--;
1101                         bp->start++;
1102                 }
1103         }
1104         return err;
1105 }
1106
1107 static int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long index,
1108                                    int clen, sector_t *blocks, int create)
1109 {
1110         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
1111         struct bpointers bp;
1112         int err;
1113
1114         if (index + clen >= inode->i_sb->s_maxbytes >> PAGE_SHIFT)
1115                 return -EFBIG;
1116
1117         bp.blocks = blocks;
1118         bp.start = index * blocks_per_page;
1119         bp.init_num = bp.num = clen * blocks_per_page;
1120         bp.create = create;
1121
1122         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
1123                bp.start, bp.start + bp.num - 1, (unsigned)inode->i_ino);
1124
1125         err = ldiskfs_ext_walk_space(inode, bp.start, bp.num,
1126                                      ldiskfs_ext_new_extent_cb, &bp);
1127         ldiskfs_ext_invalidate_cache(inode);
1128
1129         return err;
1130 }
1131
1132 static int osd_ldiskfs_map_bm_inode_pages(struct inode *inode,
1133                                           struct page **page, int pages,
1134                                           sector_t *blocks, int create)
1135 {
1136         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
1137         pgoff_t bitmap_max_page_index;
1138         sector_t *b;
1139         int rc = 0, i;
1140
1141         bitmap_max_page_index = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes >>
1142                                 PAGE_SHIFT;
1143         for (i = 0, b = blocks; i < pages; i++, page++) {
1144                 if ((*page)->index + 1 >= bitmap_max_page_index) {
1145                         rc = -EFBIG;
1146                         break;
1147                 }
1148                 rc = ldiskfs_map_inode_page(inode, *page, b, create);
1149                 if (rc) {
1150                         CERROR("ino %lu, blk %llu create %d: rc %d\n",
1151                                inode->i_ino,
1152                                (unsigned long long)*b, create, rc);
1153                         break;
1154                 }
1155                 b += blocks_per_page;
1156         }
1157         return rc;
1158 }
1159
1160 static int osd_ldiskfs_map_ext_inode_pages(struct inode *inode,
1161                                            struct page **page,
1162                                            int pages, sector_t *blocks,
1163                                            int create)
1164 {
1165         int rc = 0, i = 0, clen = 0;
1166         struct page *fp = NULL;
1167
1168         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
1169                 inode->i_ino, pages, (*page)->index);
1170
1171         /* pages are sorted already. so, we just have to find
1172          * contig. space and process them properly */
1173         while (i < pages) {
1174                 if (fp == NULL) {
1175                         /* start new extent */
1176                         fp = *page++;
1177                         clen = 1;
1178                         i++;
1179                         continue;
1180                 } else if (fp->index + clen == (*page)->index) {
1181                         /* continue the extent */
1182                         page++;
1183                         clen++;
1184                         i++;
1185                         continue;
1186                 }
1187
1188                 /* process found extent */
1189                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
1190                                              blocks, create);
1191                 if (rc)
1192                         GOTO(cleanup, rc);
1193
1194                 /* look for next extent */
1195                 fp = NULL;
1196                 blocks += clen * (PAGE_SIZE >> inode->i_blkbits);
1197         }
1198
1199         if (fp)
1200                 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
1201                                              blocks, create);
1202
1203 cleanup:
1204         return rc;
1205 }
1206
1207 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
1208                                        int pages, sector_t *blocks,
1209                                        int create)
1210 {
1211         int rc;
1212
1213         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1214                 rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
1215                                                      blocks, create);
1216                 return rc;
1217         }
1218         rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create);
1219
1220         return rc;
1221 }
1222 #else
1223 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
1224                                        int pages, sector_t *blocks,
1225                                        int create)
1226 {
1227         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
1228         int rc = 0, i = 0;
1229         struct page *fp = NULL;
1230         int clen = 0;
1231         pgoff_t max_page_index;
1232         handle_t *handle = NULL;
1233
1234         max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
1235
1236         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
1237                 inode->i_ino, pages, (*page)->index);
1238
1239         if (create) {
1240                 create = LDISKFS_GET_BLOCKS_CREATE;
1241                 handle = ldiskfs_journal_current_handle();
1242                 LASSERT(handle != NULL);
1243                 rc = osd_attach_jinode(inode);
1244                 if (rc)
1245                         return rc;
1246         }
1247         /* pages are sorted already. so, we just have to find
1248          * contig. space and process them properly */
1249         while (i < pages) {
1250                 long blen, total = 0;
1251                 struct ldiskfs_map_blocks map = { 0 };
1252
1253                 if (fp == NULL) { /* start new extent */
1254                         fp = *page++;
1255                         clen = 1;
1256                         if (++i != pages)
1257                                 continue;
1258                 } else if (fp->index + clen == (*page)->index) {
1259                         /* continue the extent */
1260                         page++;
1261                         clen++;
1262                         if (++i != pages)
1263                                 continue;
1264                 }
1265                 if (fp->index + clen >= max_page_index)
1266                         GOTO(cleanup, rc = -EFBIG);
1267                 /* process found extent */
1268                 map.m_lblk = fp->index * blocks_per_page;
1269                 map.m_len = blen = clen * blocks_per_page;
1270 cont_map:
1271                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
1272                 if (rc >= 0) {
1273                         int c = 0;
1274                         for (; total < blen && c < map.m_len; c++, total++) {
1275                                 if (rc == 0) {
1276                                         *(blocks + total) = 0;
1277                                         total++;
1278                                         break;
1279                                 } else {
1280                                         *(blocks + total) = map.m_pblk + c;
1281                                         /* unmap any possible underlying
1282                                          * metadata from the block device
1283                                          * mapping.  bug 6998. */
1284                                         if ((map.m_flags & LDISKFS_MAP_NEW) &&
1285                                             create)
1286 #ifndef HAVE_CLEAN_BDEV_ALIASES
1287                                                 unmap_underlying_metadata(
1288                                                         inode->i_sb->s_bdev,
1289                                                         map.m_pblk + c);
1290 #else
1291                                                 clean_bdev_aliases(
1292                                                         inode->i_sb->s_bdev,
1293                                                         map.m_pblk + c, 1);
1294 #endif
1295                                 }
1296                         }
1297                         rc = 0;
1298                 }
1299                 if (rc == 0 && total < blen) {
1300                         map.m_lblk = fp->index * blocks_per_page + total;
1301                         map.m_len = blen - total;
1302                         goto cont_map;
1303                 }
1304                 if (rc != 0)
1305                         GOTO(cleanup, rc);
1306
1307                 /* look for next extent */
1308                 fp = NULL;
1309                 blocks += blocks_per_page * clen;
1310         }
1311 cleanup:
1312         return rc;
1313 }
1314 #endif /* HAVE_LDISKFS_MAP_BLOCKS */
1315
1316 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1317                           struct niobuf_local *lnb, int npages)
1318 {
1319         struct osd_thread_info *oti   = osd_oti_get(env);
1320         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
1321         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
1322         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
1323         ktime_t start;
1324         ktime_t end;
1325         s64 timediff;
1326         ssize_t                 isize;
1327         __s64                   maxidx;
1328         int                     rc = 0;
1329         int                     i;
1330         int                     cache = 0;
1331
1332         LASSERT(inode);
1333
1334         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1335         if (unlikely(rc != 0))
1336                 RETURN(rc);
1337
1338         isize = i_size_read(inode);
1339         maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1340
1341         if (osd->od_writethrough_cache)
1342                 cache = 1;
1343         if (isize > osd->od_readcache_max_filesize)
1344                 cache = 0;
1345
1346         start = ktime_get();
1347         for (i = 0; i < npages; i++) {
1348
1349                 if (cache == 0)
1350                         generic_error_remove_page(inode->i_mapping,
1351                                                   lnb[i].lnb_page);
1352
1353                 /*
1354                  * till commit the content of the page is undefined
1355                  * we'll set it uptodate once bulk is done. otherwise
1356                  * subsequent reads can access non-stable data
1357                  */
1358                 ClearPageUptodate(lnb[i].lnb_page);
1359
1360                 if (lnb[i].lnb_len == PAGE_SIZE)
1361                         continue;
1362
1363                 if (maxidx >= lnb[i].lnb_page->index) {
1364                         osd_iobuf_add_page(iobuf, &lnb[i]);
1365                 } else {
1366                         long off;
1367                         char *p = kmap(lnb[i].lnb_page);
1368
1369                         off = lnb[i].lnb_page_offset;
1370                         if (off)
1371                                 memset(p, 0, off);
1372                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1373                               ~PAGE_MASK;
1374                         if (off)
1375                                 memset(p + off, 0, PAGE_SIZE - off);
1376                         kunmap(lnb[i].lnb_page);
1377                 }
1378         }
1379         end = ktime_get();
1380         timediff = ktime_us_delta(end, start);
1381         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1382
1383         if (iobuf->dr_npages) {
1384                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1385                                                  iobuf->dr_npages,
1386                                                  iobuf->dr_blocks, 0);
1387                 if (likely(rc == 0)) {
1388                         rc = osd_do_bio(osd, inode, iobuf);
1389                         /* do IO stats for preparation reads */
1390                         osd_fini_iobuf(osd, iobuf);
1391                 }
1392         }
1393         RETURN(rc);
1394 }
1395
1396 struct osd_fextent {
1397         sector_t        start;
1398         sector_t        end;
1399         unsigned int    mapped:1;
1400 };
1401
1402 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1403                          struct osd_fextent *cached_extent)
1404 {
1405         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1406         sector_t block = offset >> inode->i_blkbits;
1407         sector_t start;
1408         struct fiemap_extent_info fei = { 0 };
1409         struct fiemap_extent fe = { 0 };
1410         mm_segment_t saved_fs;
1411         int rc;
1412
1413         if (block >= cached_extent->start && block < cached_extent->end)
1414                 return cached_extent->mapped;
1415
1416         if (i_size_read(inode) == 0)
1417                 return 0;
1418
1419         /* Beyond EOF, must not be mapped */
1420         if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1421                 return 0;
1422
1423         fei.fi_extents_max = 1;
1424         fei.fi_extents_start = &fe;
1425
1426         saved_fs = get_fs();
1427         set_fs(get_ds());
1428         rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1429         set_fs(saved_fs);
1430         if (rc != 0)
1431                 return 0;
1432
1433         start = fe.fe_logical >> inode->i_blkbits;
1434
1435         if (start > block) {
1436                 cached_extent->start = block;
1437                 cached_extent->end = start;
1438                 cached_extent->mapped = 0;
1439         } else {
1440                 cached_extent->start = start;
1441                 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1442                                       inode->i_blkbits;
1443                 cached_extent->mapped = 1;
1444         }
1445
1446         return cached_extent->mapped;
1447 }
1448
1449 static int osd_declare_write_commit(const struct lu_env *env,
1450                                     struct dt_object *dt,
1451                                     struct niobuf_local *lnb, int npages,
1452                                     struct thandle *handle)
1453 {
1454         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1455         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1456         struct osd_thandle      *oh;
1457         int                     extents = 1;
1458         int                     depth;
1459         int                     i;
1460         int                     newblocks;
1461         int                     rc = 0;
1462         int                     flags = 0;
1463         int                     credits = 0;
1464         long long               quota_space = 0;
1465         struct osd_fextent      extent = { 0 };
1466         enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1467         ENTRY;
1468
1469         LASSERT(handle != NULL);
1470         oh = container_of0(handle, struct osd_thandle, ot_super);
1471         LASSERT(oh->ot_handle == NULL);
1472
1473         newblocks = npages;
1474
1475         /* calculate number of extents (probably better to pass nb) */
1476         for (i = 0; i < npages; i++) {
1477                 if (i && lnb[i].lnb_file_offset !=
1478                     lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
1479                         extents++;
1480
1481                 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent))
1482                         lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1483                 else
1484                         quota_space += PAGE_SIZE;
1485
1486                 /* ignore quota for the whole request if any page is from
1487                  * client cache or written by root.
1488                  *
1489                  * XXX once we drop the 1.8 client support, the checking
1490                  * for whether page is from cache can be simplified as:
1491                  * !(lnb[i].flags & OBD_BRW_SYNC)
1492                  *
1493                  * XXX we could handle this on per-lnb basis as done by
1494                  * grant. */
1495                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1496                     (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1497                     OBD_BRW_FROM_GRANT)
1498                         declare_flags |= OSD_QID_FORCE;
1499         }
1500
1501         /*
1502          * each extent can go into new leaf causing a split
1503          * 5 is max tree depth: inode + 4 index blocks
1504          * with blockmaps, depth is 3 at most
1505          */
1506         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1507                 /*
1508                  * many concurrent threads may grow tree by the time
1509                  * our transaction starts. so, consider 2 is a min depth
1510                  */
1511                 depth = ext_depth(inode);
1512                 depth = max(depth, 1) + 1;
1513                 newblocks += depth;
1514                 credits++; /* inode */
1515                 credits += depth * 2 * extents;
1516         } else {
1517                 depth = 3;
1518                 newblocks += depth;
1519                 credits++; /* inode */
1520                 credits += depth * extents;
1521         }
1522
1523         /* quota space for metadata blocks */
1524         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1525
1526         /* quota space should be reported in 1K blocks */
1527         quota_space = toqb(quota_space);
1528
1529         /* each new block can go in different group (bitmap + gd) */
1530
1531         /* we can't dirty more bitmap blocks than exist */
1532         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1533                 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1534         else
1535                 credits += newblocks;
1536
1537         /* we can't dirty more gd blocks than exist */
1538         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1539                 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1540         else
1541                 credits += newblocks;
1542
1543         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1544
1545         /* make sure the over quota flags were not set */
1546         lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1547
1548         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1549                                    i_projid_read(inode), quota_space, oh,
1550                                    osd_dt_obj(dt), &flags, declare_flags);
1551
1552         /* we need only to store the overquota flags in the first lnb for
1553          * now, once we support multiple objects BRW, this code needs be
1554          * revised. */
1555         if (flags & QUOTA_FL_OVER_USRQUOTA)
1556                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1557         if (flags & QUOTA_FL_OVER_GRPQUOTA)
1558                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1559         if (flags & QUOTA_FL_OVER_PRJQUOTA)
1560                 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1561
1562         if (rc == 0)
1563                 rc = osd_trunc_lock(osd_dt_obj(dt), oh, true);
1564
1565         RETURN(rc);
1566 }
1567
1568 /* Check if a block is allocated or not */
1569 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1570                             struct niobuf_local *lnb, int npages,
1571                             struct thandle *thandle)
1572 {
1573         struct osd_thread_info *oti = osd_oti_get(env);
1574         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1575         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1576         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1577         loff_t isize;
1578         int rc = 0, i;
1579
1580         LASSERT(inode);
1581
1582         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1583         if (unlikely(rc != 0))
1584                 RETURN(rc);
1585
1586         isize = i_size_read(inode);
1587         ll_vfs_dq_init(inode);
1588
1589         for (i = 0; i < npages; i++) {
1590                 if (lnb[i].lnb_rc == -ENOSPC &&
1591                     (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1592                         /* Allow the write to proceed if overwriting an
1593                          * existing block */
1594                         lnb[i].lnb_rc = 0;
1595                 }
1596
1597                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1598                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1599                                lnb[i].lnb_rc);
1600                         LASSERT(lnb[i].lnb_page);
1601                         generic_error_remove_page(inode->i_mapping,
1602                                                   lnb[i].lnb_page);
1603                         continue;
1604                 }
1605
1606                 LASSERT(PageLocked(lnb[i].lnb_page));
1607                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1608
1609                 if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize)
1610                         isize = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1611
1612                 /*
1613                  * Since write and truncate are serialized by oo_sem, even
1614                  * partial-page truncate should not leave dirty pages in the
1615                  * page cache.
1616                  */
1617                 LASSERT(!PageDirty(lnb[i].lnb_page));
1618
1619                 SetPageUptodate(lnb[i].lnb_page);
1620
1621                 osd_iobuf_add_page(iobuf, &lnb[i]);
1622         }
1623
1624         osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1625
1626         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1627                 rc = -ENOSPC;
1628         } else if (iobuf->dr_npages > 0) {
1629                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1630                                                  iobuf->dr_npages,
1631                                                  iobuf->dr_blocks, 1);
1632         } else {
1633                 /* no pages to write, no transno is needed */
1634                 thandle->th_local = 1;
1635         }
1636
1637         if (likely(rc == 0)) {
1638                 spin_lock(&inode->i_lock);
1639                 if (isize > i_size_read(inode)) {
1640                         i_size_write(inode, isize);
1641                         LDISKFS_I(inode)->i_disksize = isize;
1642                         spin_unlock(&inode->i_lock);
1643                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1644                 } else {
1645                         spin_unlock(&inode->i_lock);
1646                 }
1647
1648                 rc = osd_do_bio(osd, inode, iobuf);
1649                 /* we don't do stats here as in read path because
1650                  * write is async: we'll do this in osd_put_bufs() */
1651         } else {
1652                 osd_fini_iobuf(osd, iobuf);
1653         }
1654
1655         osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1656
1657         if (unlikely(rc != 0)) {
1658                 /* if write fails, we should drop pages from the cache */
1659                 for (i = 0; i < npages; i++) {
1660                         if (lnb[i].lnb_page == NULL)
1661                                 continue;
1662                         LASSERT(PageLocked(lnb[i].lnb_page));
1663                         generic_error_remove_page(inode->i_mapping,
1664                                                   lnb[i].lnb_page);
1665                 }
1666         }
1667
1668         RETURN(rc);
1669 }
1670
1671 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1672                          struct niobuf_local *lnb, int npages)
1673 {
1674         struct osd_thread_info *oti = osd_oti_get(env);
1675         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1676         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1677         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1678         int rc = 0, i, cache = 0, cache_hits = 0, cache_misses = 0;
1679         ktime_t start, end;
1680         s64 timediff;
1681         loff_t isize;
1682
1683         LASSERT(inode);
1684
1685         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1686         if (unlikely(rc != 0))
1687                 RETURN(rc);
1688
1689         isize = i_size_read(inode);
1690
1691         if (osd->od_read_cache)
1692                 cache = 1;
1693         if (isize > osd->od_readcache_max_filesize)
1694                 cache = 0;
1695
1696         start = ktime_get();
1697         for (i = 0; i < npages; i++) {
1698
1699                 if (isize <= lnb[i].lnb_file_offset)
1700                         /* If there's no more data, abort early.
1701                          * lnb->lnb_rc == 0, so it's easy to detect later. */
1702                         break;
1703
1704                 if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len)
1705                         lnb[i].lnb_rc = isize - lnb[i].lnb_file_offset;
1706                 else
1707                         lnb[i].lnb_rc = lnb[i].lnb_len;
1708
1709                 /* Bypass disk read if fail_loc is set properly */
1710                 if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
1711                         SetPageUptodate(lnb[i].lnb_page);
1712
1713                 if (PageUptodate(lnb[i].lnb_page)) {
1714                         cache_hits++;
1715                 } else {
1716                         cache_misses++;
1717                         osd_iobuf_add_page(iobuf, &lnb[i]);
1718                 }
1719
1720                 if (cache == 0)
1721                         generic_error_remove_page(inode->i_mapping,
1722                                                   lnb[i].lnb_page);
1723         }
1724         end = ktime_get();
1725         timediff = ktime_us_delta(end, start);
1726         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1727
1728         if (cache_hits != 0)
1729                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1730                                     cache_hits);
1731         if (cache_misses != 0)
1732                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1733                                     cache_misses);
1734         if (cache_hits + cache_misses != 0)
1735                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1736                                     cache_hits + cache_misses);
1737
1738         if (iobuf->dr_npages) {
1739                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1740                                                  iobuf->dr_npages,
1741                                                  iobuf->dr_blocks, 0);
1742                 rc = osd_do_bio(osd, inode, iobuf);
1743
1744                 /* IO stats will be done in osd_bufs_put() */
1745         }
1746
1747         RETURN(rc);
1748 }
1749
1750 /*
1751  * XXX: Another layering violation for now.
1752  *
1753  * We don't want to use ->f_op->read methods, because generic file write
1754  *
1755  *         - serializes on ->i_sem, and
1756  *
1757  *         - does a lot of extra work like balance_dirty_pages(),
1758  *
1759  * which doesn't work for globally shared files like /last_rcvd.
1760  */
1761 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1762 {
1763         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1764
1765         memcpy(buffer, (char *)ei->i_data, buflen);
1766
1767         return  buflen;
1768 }
1769
1770 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1771 {
1772         struct buffer_head *bh;
1773         unsigned long block;
1774         int osize;
1775         int blocksize;
1776         int csize;
1777         int boffs;
1778
1779         /* prevent reading after eof */
1780         spin_lock(&inode->i_lock);
1781         if (i_size_read(inode) < *offs + size) {
1782                 loff_t diff = i_size_read(inode) - *offs;
1783                 spin_unlock(&inode->i_lock);
1784                 if (diff < 0) {
1785                         CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
1786                                i_size_read(inode), *offs);
1787                         return -EBADR;
1788                 } else if (diff == 0) {
1789                         return 0;
1790                 } else {
1791                         size = diff;
1792                 }
1793         } else {
1794                 spin_unlock(&inode->i_lock);
1795         }
1796
1797         blocksize = 1 << inode->i_blkbits;
1798         osize = size;
1799         while (size > 0) {
1800                 block = *offs >> inode->i_blkbits;
1801                 boffs = *offs & (blocksize - 1);
1802                 csize = min(blocksize - boffs, size);
1803                 bh = __ldiskfs_bread(NULL, inode, block, 0);
1804                 if (IS_ERR(bh)) {
1805                         CERROR("%s: can't read %u@%llu on ino %lu: "
1806                                "rc = %ld\n", osd_ino2name(inode),
1807                                csize, *offs, inode->i_ino,
1808                                PTR_ERR(bh));
1809                         return PTR_ERR(bh);
1810                 }
1811
1812                 if (bh != NULL) {
1813                         memcpy(buf, bh->b_data + boffs, csize);
1814                         brelse(bh);
1815                 } else {
1816                         memset(buf, 0, csize);
1817                 }
1818
1819                 *offs += csize;
1820                 buf += csize;
1821                 size -= csize;
1822         }
1823         return osize;
1824 }
1825
1826 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1827                         struct lu_buf *buf, loff_t *pos)
1828 {
1829         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1830         int           rc;
1831
1832         /* Read small symlink from inode body as we need to maintain correct
1833          * on-disk symlinks for ldiskfs.
1834          */
1835         if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
1836             (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1837                 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
1838         else
1839                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1840
1841         return rc;
1842 }
1843
1844 static inline int osd_extents_enabled(struct super_block *sb,
1845                                       struct inode *inode)
1846 {
1847         if (inode != NULL) {
1848                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1849                         return 1;
1850         } else if (ldiskfs_has_feature_extents(sb)) {
1851                 return 1;
1852         }
1853         return 0;
1854 }
1855
1856 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1857                            const loff_t size, const loff_t pos,
1858                            const int blocks)
1859 {
1860         int credits, bits, bs, i;
1861
1862         bits = sb->s_blocksize_bits;
1863         bs = 1 << bits;
1864
1865         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1866          * we do not expect blockmaps on the large files,
1867          * so let's shrink it to 2 levels (4GB files) */
1868
1869         /* this is default reservation: 2 levels */
1870         credits = (blocks + 2) * 3;
1871
1872         /* actual offset is unknown, hard to optimize */
1873         if (pos == -1)
1874                 return credits;
1875
1876         /* now check for few specific cases to optimize */
1877         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1878                 /* no indirects */
1879                 credits = blocks;
1880                 /* allocate if not allocated */
1881                 if (inode == NULL) {
1882                         credits += blocks * 2;
1883                         return credits;
1884                 }
1885                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1886                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1887                         if (LDISKFS_I(inode)->i_data[i] == 0)
1888                                 credits += 2;
1889                 }
1890         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1891                 /* single indirect */
1892                 credits = blocks * 3;
1893                 if (inode == NULL ||
1894                     LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1895                         credits += 3;
1896                 else
1897                         /* The indirect block may be modified. */
1898                         credits += 1;
1899         }
1900
1901         return credits;
1902 }
1903
1904 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1905                                  const struct lu_buf *buf, loff_t _pos,
1906                                  struct thandle *handle)
1907 {
1908         struct osd_object  *obj  = osd_dt_obj(dt);
1909         struct inode       *inode = obj->oo_inode;
1910         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1911         struct osd_thandle *oh;
1912         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1913         int                 bits, bs;
1914         int                 depth, size;
1915         loff_t              pos;
1916         ENTRY;
1917
1918         LASSERT(buf != NULL);
1919         LASSERT(handle != NULL);
1920
1921         oh = container_of0(handle, struct osd_thandle, ot_super);
1922         LASSERT(oh->ot_handle == NULL);
1923
1924         size = buf->lb_len;
1925         bits = sb->s_blocksize_bits;
1926         bs = 1 << bits;
1927
1928         if (_pos == -1) {
1929                 /* if this is an append, then we
1930                  * should expect cross-block record */
1931                 pos = 0;
1932         } else {
1933                 pos = _pos;
1934         }
1935
1936         /* blocks to modify */
1937         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1938         LASSERT(blocks > 0);
1939
1940         if (inode != NULL && _pos != -1) {
1941                 /* object size in blocks */
1942                 est = (i_size_read(inode) + bs - 1) >> bits;
1943                 allocated = inode->i_blocks >> (bits - 9);
1944                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1945                         /* looks like an overwrite, no need to modify tree */
1946                         credits = blocks;
1947                         /* no need to modify i_size */
1948                         goto out;
1949                 }
1950         }
1951
1952         if (osd_extents_enabled(sb, inode)) {
1953                 /*
1954                  * many concurrent threads may grow tree by the time
1955                  * our transaction starts. so, consider 2 is a min depth
1956                  * for every level we may need to allocate a new block
1957                  * and take some entries from the old one. so, 3 blocks
1958                  * to allocate (bitmap, gd, itself) + old block - 4 per
1959                  * level.
1960                  */
1961                 depth = inode != NULL ? ext_depth(inode) : 0;
1962                 depth = max(depth, 1) + 1;
1963                 credits = depth;
1964                 /* if not append, then split may need to modify
1965                  * existing blocks moving entries into the new ones */
1966                 if (_pos != -1)
1967                         credits += depth;
1968                 /* blocks to store data: bitmap,gd,itself */
1969                 credits += blocks * 3;
1970         } else {
1971                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1972         }
1973         /* if inode is created as part of the transaction,
1974          * then it's counted already by the creation method */
1975         if (inode != NULL)
1976                 credits++;
1977
1978 out:
1979
1980         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1981
1982         /* dt_declare_write() is usually called for system objects, such
1983          * as llog or last_rcvd files. We needn't enforce quota on those
1984          * objects, so always set the lqi_space as 0. */
1985         if (inode != NULL)
1986                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1987                                            i_gid_read(inode),
1988                                            i_projid_read(inode), 0,
1989                                            oh, obj, NULL, OSD_QID_BLK);
1990
1991         if (rc == 0)
1992                 rc = osd_trunc_lock(obj, oh, true);
1993
1994         RETURN(rc);
1995 }
1996
1997 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1998 {
1999         /* LU-2634: clear the extent format for fast symlink */
2000         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
2001
2002         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
2003         spin_lock(&inode->i_lock);
2004         LDISKFS_I(inode)->i_disksize = buflen;
2005         i_size_write(inode, buflen);
2006         spin_unlock(&inode->i_lock);
2007         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2008
2009         return 0;
2010 }
2011
2012 int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
2013                              int write_NUL, loff_t *offs, handle_t *handle)
2014 {
2015         struct buffer_head *bh        = NULL;
2016         loff_t              offset    = *offs;
2017         loff_t              new_size  = i_size_read(inode);
2018         unsigned long       block;
2019         int                 blocksize = 1 << inode->i_blkbits;
2020         int                 err = 0;
2021         int                 size;
2022         int                 boffs;
2023         int                 dirty_inode = 0;
2024
2025         if (write_NUL) {
2026                 /*
2027                  * long symlink write does not count the NUL terminator in
2028                  * bufsize, we write it, and the inode's file size does not
2029                  * count the NUL terminator as well.
2030                  */
2031                 ((char *)buf)[bufsize] = '\0';
2032                 ++bufsize;
2033         }
2034
2035         while (bufsize > 0) {
2036                 int credits = handle->h_buffer_credits;
2037
2038                 if (bh)
2039                         brelse(bh);
2040
2041                 block = offset >> inode->i_blkbits;
2042                 boffs = offset & (blocksize - 1);
2043                 size = min(blocksize - boffs, bufsize);
2044                 bh = __ldiskfs_bread(handle, inode, block, 1);
2045                 if (IS_ERR_OR_NULL(bh)) {
2046                         if (bh == NULL) {
2047                                 err = -EIO;
2048                         } else {
2049                                 err = PTR_ERR(bh);
2050                                 bh = NULL;
2051                         }
2052
2053                         CERROR("%s: error reading offset %llu (block %lu, "
2054                                "size %d, offs %llu), credits %d/%d: rc = %d\n",
2055                                inode->i_sb->s_id, offset, block, bufsize, *offs,
2056                                credits, handle->h_buffer_credits, err);
2057                         break;
2058                 }
2059
2060                 err = ldiskfs_journal_get_write_access(handle, bh);
2061                 if (err) {
2062                         CERROR("journal_get_write_access() returned error %d\n",
2063                                err);
2064                         break;
2065                 }
2066                 LASSERTF(boffs + size <= bh->b_size,
2067                          "boffs %d size %d bh->b_size %lu\n",
2068                          boffs, size, (unsigned long)bh->b_size);
2069                 memcpy(bh->b_data + boffs, buf, size);
2070                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2071                 if (err)
2072                         break;
2073
2074                 if (offset + size > new_size)
2075                         new_size = offset + size;
2076                 offset += size;
2077                 bufsize -= size;
2078                 buf += size;
2079         }
2080         if (bh)
2081                 brelse(bh);
2082
2083         if (write_NUL)
2084                 --new_size;
2085         /* correct in-core and on-disk sizes */
2086         if (new_size > i_size_read(inode)) {
2087                 spin_lock(&inode->i_lock);
2088                 if (new_size > i_size_read(inode))
2089                         i_size_write(inode, new_size);
2090                 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
2091                         LDISKFS_I(inode)->i_disksize = i_size_read(inode);
2092                         dirty_inode = 1;
2093                 }
2094                 spin_unlock(&inode->i_lock);
2095                 if (dirty_inode)
2096                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2097         }
2098
2099         if (err == 0)
2100                 *offs = offset;
2101         return err;
2102 }
2103
2104 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
2105                          const struct lu_buf *buf, loff_t *pos,
2106                          struct thandle *handle)
2107 {
2108         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
2109         struct osd_thandle      *oh;
2110         ssize_t                 result;
2111         int                     is_link;
2112
2113         LASSERT(dt_object_exists(dt));
2114
2115         LASSERT(handle != NULL);
2116         LASSERT(inode != NULL);
2117         ll_vfs_dq_init(inode);
2118
2119         /* XXX: don't check: one declared chunk can be used many times */
2120         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
2121
2122         oh = container_of(handle, struct osd_thandle, ot_super);
2123         LASSERT(oh->ot_handle->h_transaction != NULL);
2124         osd_trans_exec_op(env, handle, OSD_OT_WRITE);
2125
2126         /* Write small symlink to inode body as we need to maintain correct
2127          * on-disk symlinks for ldiskfs.
2128          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
2129          * does not count it in.
2130          */
2131         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
2132         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
2133                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
2134         else
2135                 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
2136                                                   buf->lb_len, is_link, pos,
2137                                                   oh->ot_handle);
2138         if (result == 0)
2139                 result = buf->lb_len;
2140
2141         osd_trans_exec_check(env, handle, OSD_OT_WRITE);
2142
2143         return result;
2144 }
2145
2146 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
2147                              __u64 start, __u64 end, struct thandle *th)
2148 {
2149         struct osd_thandle *oh;
2150         struct inode       *inode;
2151         int                 rc;
2152         ENTRY;
2153
2154         LASSERT(th);
2155         oh = container_of(th, struct osd_thandle, ot_super);
2156
2157         /*
2158          * we don't need to reserve credits for whole truncate
2159          * it's not possible as truncate may need to free too many
2160          * blocks and that won't fit a single transaction. instead
2161          * we reserve credits to change i_size and put inode onto
2162          * orphan list. if needed truncate will extend or restart
2163          * transaction
2164          */
2165         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
2166                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
2167
2168         inode = osd_dt_obj(dt)->oo_inode;
2169         LASSERT(inode);
2170
2171         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2172                                    i_projid_read(inode), 0, oh, osd_dt_obj(dt),
2173                                    NULL, OSD_QID_BLK);
2174
2175         if (rc == 0)
2176                 rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
2177
2178         RETURN(rc);
2179 }
2180
2181 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
2182                      __u64 start, __u64 end, struct thandle *th)
2183 {
2184         struct osd_object *obj = osd_dt_obj(dt);
2185         struct osd_device *osd = osd_obj2dev(obj);
2186         struct inode *inode = obj->oo_inode;
2187         struct osd_access_lock *al;
2188         struct osd_thandle *oh;
2189         int rc = 0, found = 0;
2190         bool grow = false;
2191         ENTRY;
2192
2193         LASSERT(end == OBD_OBJECT_EOF);
2194         LASSERT(dt_object_exists(dt));
2195         LASSERT(osd_invariant(obj));
2196         LASSERT(inode != NULL);
2197         ll_vfs_dq_init(inode);
2198
2199         LASSERT(th);
2200         oh = container_of(th, struct osd_thandle, ot_super);
2201         LASSERT(oh->ot_handle->h_transaction != NULL);
2202
2203         /* we used to skip truncate to current size to
2204          * optimize truncates on OST. with DoM we can
2205          * get attr_set to set specific size (MDS_REINT)
2206          * and then get truncate RPC which essentially
2207          * would be skipped. this is bad.. so, disable
2208          * this optimization on MDS till the client stop
2209          * to sent MDS_REINT (LU-11033) -bzzz */
2210         if (osd->od_is_ost && i_size_read(inode) == start)
2211                 RETURN(0);
2212
2213         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
2214
2215         spin_lock(&inode->i_lock);
2216         if (i_size_read(inode) < start)
2217                 grow = true;
2218         i_size_write(inode, start);
2219         spin_unlock(&inode->i_lock);
2220         ll_truncate_pagecache(inode, start);
2221
2222         /* optimize grow case */
2223         if (grow) {
2224                 osd_execute_truncate(obj);
2225                 GOTO(out, rc);
2226         }
2227
2228         /* add to orphan list to ensure truncate completion
2229          * if this transaction succeed. ldiskfs_truncate()
2230          * will take the inode out of the list */
2231         rc = ldiskfs_orphan_add(oh->ot_handle, inode);
2232         if (rc != 0)
2233                 GOTO(out, rc);
2234
2235         list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2236                 if (obj != al->tl_obj)
2237                         continue;
2238                 LASSERT(al->tl_shared == 0);
2239                 found = 1;
2240                 /* do actual truncate in osd_trans_stop() */
2241                 al->tl_truncate = 1;
2242                 break;
2243         }
2244         LASSERT(found);
2245
2246 out:
2247         RETURN(rc);
2248 }
2249
2250 static int fiemap_check_ranges(struct inode *inode,
2251                                u64 start, u64 len, u64 *new_len)
2252 {
2253         loff_t maxbytes;
2254
2255         *new_len = len;
2256
2257         if (len == 0)
2258                 return -EINVAL;
2259
2260         if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
2261                 maxbytes = inode->i_sb->s_maxbytes;
2262         else
2263                 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
2264
2265         if (start > maxbytes)
2266                 return -EFBIG;
2267
2268         /*
2269          * Shrink request scope to what the fs can actually handle.
2270          */
2271         if (len > maxbytes || (maxbytes - len) < start)
2272                 *new_len = maxbytes - start;
2273
2274         return 0;
2275 }
2276
2277 /* So that the fiemap access checks can't overflow on 32 bit machines. */
2278 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
2279
2280 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
2281                           struct fiemap *fm)
2282 {
2283         struct fiemap_extent_info fieinfo = {0, };
2284         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2285         u64 len;
2286         int rc;
2287
2288
2289         LASSERT(inode);
2290         if (inode->i_op->fiemap == NULL)
2291                 return -EOPNOTSUPP;
2292
2293         if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
2294                 return -EINVAL;
2295
2296         rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
2297         if (rc)
2298                 return rc;
2299
2300         fieinfo.fi_flags = fm->fm_flags;
2301         fieinfo.fi_extents_max = fm->fm_extent_count;
2302         fieinfo.fi_extents_start = fm->fm_extents;
2303
2304         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
2305                 filemap_write_and_wait(inode->i_mapping);
2306
2307         rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2308         fm->fm_flags = fieinfo.fi_flags;
2309         fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2310
2311         return rc;
2312 }
2313
2314 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2315                        __u64 start, __u64 end, enum lu_ladvise_type advice)
2316 {
2317         int              rc = 0;
2318         struct inode    *inode = osd_dt_obj(dt)->oo_inode;
2319         ENTRY;
2320
2321         switch (advice) {
2322         case LU_LADVISE_DONTNEED:
2323                 if (end == 0)
2324                         break;
2325                 invalidate_mapping_pages(inode->i_mapping,
2326                                          start >> PAGE_SHIFT,
2327                                          (end - 1) >> PAGE_SHIFT);
2328                 break;
2329         default:
2330                 rc = -ENOTSUPP;
2331                 break;
2332         }
2333
2334         RETURN(rc);
2335 }
2336
2337 /*
2338  * in some cases we may need declare methods for objects being created
2339  * e.g., when we create symlink
2340  */
2341 const struct dt_body_operations osd_body_ops_new = {
2342         .dbo_declare_write = osd_declare_write,
2343 };
2344
2345 const struct dt_body_operations osd_body_ops = {
2346         .dbo_read                       = osd_read,
2347         .dbo_declare_write              = osd_declare_write,
2348         .dbo_write                      = osd_write,
2349         .dbo_bufs_get                   = osd_bufs_get,
2350         .dbo_bufs_put                   = osd_bufs_put,
2351         .dbo_write_prep                 = osd_write_prep,
2352         .dbo_declare_write_commit       = osd_declare_write_commit,
2353         .dbo_write_commit               = osd_write_commit,
2354         .dbo_read_prep                  = osd_read_prep,
2355         .dbo_declare_punch              = osd_declare_punch,
2356         .dbo_punch                      = osd_punch,
2357         .dbo_fiemap_get                 = osd_fiemap_get,
2358         .dbo_ladvise                    = osd_ladvise,
2359 };
2360
2361 /**
2362  * Get a truncate lock
2363  *
2364  * In order to take multi-transaction truncate out of main transaction we let
2365  * the caller grab a lock on the object passed. the lock can be shared (for
2366  * writes) and exclusive (for truncate). It's not allowed to mix truncate
2367  * and write in the same transaction handle (do not confuse with big ldiskfs
2368  * transaction containing lots of handles).
2369  * The lock must be taken at declaration.
2370  *
2371  * \param obj           object to lock
2372  * \oh                  transaction
2373  * \shared              shared or exclusive
2374  *
2375  * \retval 0            lock is granted
2376  * \retval -NOMEM       no memory to allocate lock
2377  */
2378 int osd_trunc_lock(struct osd_object *obj, struct osd_thandle *oh, bool shared)
2379 {
2380         struct osd_access_lock *al, *tmp;
2381
2382         LASSERT(obj);
2383         LASSERT(oh);
2384
2385         list_for_each_entry(tmp, &oh->ot_trunc_locks, tl_list) {
2386                 if (tmp->tl_obj != obj)
2387                         continue;
2388                 LASSERT(tmp->tl_shared == shared);
2389                 /* found same lock */
2390                 return 0;
2391         }
2392
2393         OBD_ALLOC_PTR(al);
2394         if (unlikely(al == NULL))
2395                 return -ENOMEM;
2396         al->tl_obj = obj;
2397         al->tl_truncate = false;
2398         if (shared)
2399                 down_read(&obj->oo_ext_idx_sem);
2400         else
2401                 down_write(&obj->oo_ext_idx_sem);
2402         al->tl_shared = shared;
2403
2404         list_add(&al->tl_list, &oh->ot_trunc_locks);
2405
2406         return 0;
2407 }
2408
2409 void osd_trunc_unlock_all(struct list_head *list)
2410 {
2411         struct osd_access_lock *al, *tmp;
2412         list_for_each_entry_safe(al, tmp, list, tl_list) {
2413                 if (al->tl_shared)
2414                         up_read(&al->tl_obj->oo_ext_idx_sem);
2415                 else
2416                         up_write(&al->tl_obj->oo_ext_idx_sem);
2417                 list_del(&al->tl_list);
2418                 OBD_FREE_PTR(al);
2419         }
2420 }
2421
2422 void osd_execute_truncate(struct osd_object *obj)
2423 {
2424         struct osd_device *d = osd_obj2dev(obj);
2425         struct inode *inode = obj->oo_inode;
2426         __u64 size;
2427
2428         /* simulate crash before (in the middle) of delayed truncate */
2429         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FAIL_AT_TRUNCATE)) {
2430                 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2431                 struct ldiskfs_sb_info *sbi = LDISKFS_SB(inode->i_sb);
2432
2433                 mutex_lock(&sbi->s_orphan_lock);
2434                 list_del_init(&ei->i_orphan);
2435                 mutex_unlock(&sbi->s_orphan_lock);
2436                 return;
2437         }
2438
2439 #ifdef HAVE_INODEOPS_TRUNCATE
2440         if (inode->i_op->truncate)
2441                 inode->i_op->truncate(inode);
2442         else
2443 #endif
2444                 ldiskfs_truncate(inode);
2445
2446         /*
2447          * For a partial-page truncate, flush the page to disk immediately to
2448          * avoid data corruption during direct disk write.  b=17397
2449          */
2450         size = i_size_read(inode);
2451         if ((size & ~PAGE_MASK) == 0)
2452                 return;
2453         if (osd_use_page_cache(d)) {
2454                 filemap_fdatawrite_range(inode->i_mapping, size, size + 1);
2455         } else {
2456                 /* Notice we use "wait" version to ensure I/O is complete */
2457                 filemap_write_and_wait_range(inode->i_mapping, size, size + 1);
2458                 invalidate_mapping_pages(inode->i_mapping, size >> PAGE_SHIFT,
2459                                          size >> PAGE_SHIFT);
2460         }
2461 }
2462
2463 void osd_process_truncates(struct list_head *list)
2464 {
2465         struct osd_access_lock *al;
2466
2467         LASSERT(journal_current_handle() == NULL);
2468
2469         list_for_each_entry(al, list, tl_list) {
2470                 if (al->tl_shared)
2471                         continue;
2472                 if (!al->tl_truncate)
2473                         continue;
2474                 osd_execute_truncate(al->tl_obj);
2475         }
2476 }