Whamcloud - gitweb
LU-3536 osp: move update packing into out_lib.c
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osd/osd_io.c
37  *
38  * body operations
39  *
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
42  *
43  */
44
45 /* LUSTRE_VERSION_CODE */
46 #include <lustre_ver.h>
47 /* prerequisite for linux/xattr.h */
48 #include <linux/types.h>
49 /* prerequisite for linux/xattr.h */
50 #include <linux/fs.h>
51
52 /*
53  * struct OBD_{ALLOC,FREE}*()
54  * OBD_FAIL_CHECK
55  */
56 #include <obd_support.h>
57
58 #include "osd_internal.h"
59
60 /* ext_depth() */
61 #include <ldiskfs/ldiskfs_extents.h>
62
63 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
64                             int rw, int line, int pages)
65 {
66         int blocks, i;
67
68         LASSERTF(iobuf->dr_elapsed_valid == 0,
69                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
70                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
71                  iobuf->dr_init_at);
72         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
73
74         init_waitqueue_head(&iobuf->dr_wait);
75         atomic_set(&iobuf->dr_numreqs, 0);
76         iobuf->dr_npages = 0;
77         iobuf->dr_error = 0;
78         iobuf->dr_dev = d;
79         iobuf->dr_frags = 0;
80         iobuf->dr_elapsed = 0;
81         /* must be counted before, so assert */
82         iobuf->dr_rw = rw;
83         iobuf->dr_init_at = line;
84
85         blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
86         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
87                 LASSERT(iobuf->dr_pg_buf.lb_len >=
88                         pages * sizeof(iobuf->dr_pages[0]));
89                 return 0;
90         }
91
92         /* start with 1MB for 4K blocks */
93         i = 256;
94         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
95                 i <<= 1;
96
97         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
98                (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
99         pages = i;
100         blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
101         iobuf->dr_max_pages = 0;
102         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
103                (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
104
105         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
106         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
107         if (unlikely(iobuf->dr_blocks == NULL))
108                 return -ENOMEM;
109
110         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
111         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
112         if (unlikely(iobuf->dr_pages == NULL))
113                 return -ENOMEM;
114
115         iobuf->dr_max_pages = pages;
116
117         return 0;
118 }
119 #define osd_init_iobuf(dev, iobuf, rw, pages) \
120         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
121
122 static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
123 {
124         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
125         iobuf->dr_pages[iobuf->dr_npages++] = page;
126 }
127
128 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
129 {
130         int rw = iobuf->dr_rw;
131
132         if (iobuf->dr_elapsed_valid) {
133                 iobuf->dr_elapsed_valid = 0;
134                 LASSERT(iobuf->dr_dev == d);
135                 LASSERT(iobuf->dr_frags > 0);
136                 lprocfs_oh_tally(&d->od_brw_stats.
137                                  hist[BRW_R_DIO_FRAGS+rw],
138                                  iobuf->dr_frags);
139                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
140                                       iobuf->dr_elapsed);
141         }
142 }
143
144 #ifndef REQ_WRITE /* pre-2.6.35 */
145 #define __REQ_WRITE BIO_RW
146 #endif
147
148 static void dio_complete_routine(struct bio *bio, int error)
149 {
150         struct osd_iobuf *iobuf = bio->bi_private;
151 #ifdef HAVE_BVEC_ITER
152         struct bvec_iter iter;
153         struct bio_vec bvl;
154 #else
155         int iter;
156         struct bio_vec *bvl;
157 #endif
158
159         /* CAVEAT EMPTOR: possibly in IRQ context
160          * DO NOT record procfs stats here!!! */
161
162         if (unlikely(iobuf == NULL)) {
163                 CERROR("***** bio->bi_private is NULL!  This should never "
164                        "happen.  Normally, I would crash here, but instead I "
165                        "will dump the bio contents to the console.  Please "
166                        "report this to <https://jira.hpdd.intel.com/> , along "
167                        "with any interesting messages leading up to this point "
168                        "(like SCSI errors, perhaps).  Because bi_private is "
169                        "NULL, I can't wake up the thread that initiated this "
170                        "IO - you will probably have to reboot this node.\n");
171                 CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
172                        "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
173                        "bi_private: %p\n", bio->bi_next, bio->bi_flags,
174                         bio->bi_rw, bio->bi_vcnt, bio_idx(bio),
175                         bio_sectors(bio) << 9, bio->bi_end_io,
176                         atomic_read(&bio->bi_cnt), bio->bi_private);
177                 return;
178         }
179
180         /* the check is outside of the cycle for performance reason -bzzz */
181         if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
182                 bio_for_each_segment(bvl, bio, iter) {
183                         if (likely(error == 0))
184                                 SetPageUptodate(bvec_iter_page(&bvl, iter));
185                         LASSERT(PageLocked(bvec_iter_page(&bvl, iter)));
186                 }
187                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
188         } else {
189                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
190         }
191
192         /* any real error is good enough -bzzz */
193         if (error != 0 && iobuf->dr_error == 0)
194                 iobuf->dr_error = error;
195
196         /*
197          * set dr_elapsed before dr_numreqs turns to 0, otherwise
198          * it's possible that service thread will see dr_numreqs
199          * is zero, but dr_elapsed is not set yet, leading to lost
200          * data in this processing and an assertion in a subsequent
201          * call to OSD.
202          */
203         if (atomic_read(&iobuf->dr_numreqs) == 1) {
204                 iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
205                 iobuf->dr_elapsed_valid = 1;
206         }
207         if (atomic_dec_and_test(&iobuf->dr_numreqs))
208                 wake_up(&iobuf->dr_wait);
209
210         /* Completed bios used to be chained off iobuf->dr_bios and freed in
211          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
212          * mempool when serious on-disk fragmentation was encountered,
213          * deadlocking the OST.  The bios are now released as soon as complete
214          * so the pool cannot be exhausted while IOs are competing. bug 10076 */
215         bio_put(bio);
216 }
217
218 static void record_start_io(struct osd_iobuf *iobuf, int size)
219 {
220         struct osd_device    *osd = iobuf->dr_dev;
221         struct obd_histogram *h = osd->od_brw_stats.hist;
222
223         iobuf->dr_frags++;
224         atomic_inc(&iobuf->dr_numreqs);
225
226         if (iobuf->dr_rw == 0) {
227                 atomic_inc(&osd->od_r_in_flight);
228                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
229                                  atomic_read(&osd->od_r_in_flight));
230                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
231         } else if (iobuf->dr_rw == 1) {
232                 atomic_inc(&osd->od_w_in_flight);
233                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
234                                  atomic_read(&osd->od_w_in_flight));
235                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
236         } else {
237                 LBUG();
238         }
239 }
240
241 static void osd_submit_bio(int rw, struct bio *bio)
242 {
243         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
244         if (rw == 0)
245                 submit_bio(READ, bio);
246         else
247                 submit_bio(WRITE, bio);
248 }
249
250 static int can_be_merged(struct bio *bio, sector_t sector)
251 {
252         if (bio == NULL)
253                 return 0;
254
255         return bio_end_sector(bio) == sector ? 1 : 0;
256 }
257
258 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
259                       struct osd_iobuf *iobuf)
260 {
261         int            blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
262         struct page  **pages = iobuf->dr_pages;
263         int            npages = iobuf->dr_npages;
264         unsigned long *blocks = iobuf->dr_blocks;
265         int            total_blocks = npages * blocks_per_page;
266         int            sector_bits = inode->i_sb->s_blocksize_bits - 9;
267         unsigned int   blocksize = inode->i_sb->s_blocksize;
268         struct bio    *bio = NULL;
269         struct page   *page;
270         unsigned int   page_offset;
271         sector_t       sector;
272         int            nblocks;
273         int            block_idx;
274         int            page_idx;
275         int            i;
276         int            rc = 0;
277         ENTRY;
278
279         LASSERT(iobuf->dr_npages == npages);
280
281         osd_brw_stats_update(osd, iobuf);
282         iobuf->dr_start_time = cfs_time_current();
283
284         for (page_idx = 0, block_idx = 0;
285              page_idx < npages;
286              page_idx++, block_idx += blocks_per_page) {
287
288                 page = pages[page_idx];
289                 LASSERT(block_idx + blocks_per_page <= total_blocks);
290
291                 for (i = 0, page_offset = 0;
292                      i < blocks_per_page;
293                      i += nblocks, page_offset += blocksize * nblocks) {
294
295                         nblocks = 1;
296
297                         if (blocks[block_idx + i] == 0) {  /* hole */
298                                 LASSERTF(iobuf->dr_rw == 0,
299                                          "page_idx %u, block_idx %u, i %u\n",
300                                          page_idx, block_idx, i);
301                                 memset(kmap(page) + page_offset, 0, blocksize);
302                                 kunmap(page);
303                                 continue;
304                         }
305
306                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
307
308                         /* Additional contiguous file blocks? */
309                         while (i + nblocks < blocks_per_page &&
310                                (sector + (nblocks << sector_bits)) ==
311                                ((sector_t)blocks[block_idx + i + nblocks] <<
312                                 sector_bits))
313                                 nblocks++;
314
315                         if (bio != NULL &&
316                             can_be_merged(bio, sector) &&
317                             bio_add_page(bio, page,
318                                          blocksize * nblocks, page_offset) != 0)
319                                 continue;       /* added this frag OK */
320
321                         if (bio != NULL) {
322                                 struct request_queue *q =
323                                         bdev_get_queue(bio->bi_bdev);
324                                 unsigned int bi_size = bio_sectors(bio) << 9;
325
326                                 /* Dang! I have to fragment this I/O */
327                                 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
328                                        "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
329                                        bi_size, bio->bi_vcnt, bio->bi_max_vecs,
330                                        bio_sectors(bio),
331                                        queue_max_sectors(q),
332                                        bio_phys_segments(q, bio),
333                                        queue_max_phys_segments(q),
334                                        0, queue_max_hw_segments(q));
335                                 record_start_io(iobuf, bi_size);
336                                 osd_submit_bio(iobuf->dr_rw, bio);
337                         }
338
339                         /* allocate new bio */
340                         bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
341                                                       (npages - page_idx) *
342                                                       blocks_per_page));
343                         if (bio == NULL) {
344                                 CERROR("Can't allocate bio %u*%u = %u pages\n",
345                                        (npages - page_idx), blocks_per_page,
346                                        (npages - page_idx) * blocks_per_page);
347                                 rc = -ENOMEM;
348                                 goto out;
349                         }
350
351                         bio->bi_bdev = inode->i_sb->s_bdev;
352                         bio_set_sector(bio, sector);
353                         bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
354                         bio->bi_end_io = dio_complete_routine;
355                         bio->bi_private = iobuf;
356
357                         rc = bio_add_page(bio, page,
358                                           blocksize * nblocks, page_offset);
359                         LASSERT(rc != 0);
360                 }
361         }
362
363         if (bio != NULL) {
364                 record_start_io(iobuf, bio_sectors(bio) << 9);
365                 osd_submit_bio(iobuf->dr_rw, bio);
366                 rc = 0;
367         }
368
369 out:
370         /* in order to achieve better IO throughput, we don't wait for writes
371          * completion here. instead we proceed with transaction commit in
372          * parallel and wait for IO completion once transaction is stopped
373          * see osd_trans_stop() for more details -bzzz */
374         if (iobuf->dr_rw == 0) {
375                 wait_event(iobuf->dr_wait,
376                            atomic_read(&iobuf->dr_numreqs) == 0);
377                 osd_fini_iobuf(osd, iobuf);
378         }
379
380         if (rc == 0)
381                 rc = iobuf->dr_error;
382         RETURN(rc);
383 }
384
385 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
386                                    struct niobuf_local *lnb)
387 {
388         ENTRY;
389
390         *nrpages = 0;
391
392         while (len > 0) {
393                 int poff = offset & (PAGE_CACHE_SIZE - 1);
394                 int plen = PAGE_CACHE_SIZE - poff;
395
396                 if (plen > len)
397                         plen = len;
398                 lnb->lnb_file_offset = offset;
399                 lnb->lnb_page_offset = poff;
400                 lnb->lnb_len = plen;
401                 /* lnb->lnb_flags = rnb->rnb_flags; */
402                 lnb->lnb_flags = 0;
403                 lnb->lnb_page = NULL;
404                 lnb->lnb_rc = 0;
405
406                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
407                          (long long) len);
408                 offset += plen;
409                 len -= plen;
410                 lnb++;
411                 (*nrpages)++;
412         }
413
414         RETURN(0);
415 }
416
417 struct page *osd_get_page(struct dt_object *dt, loff_t offset, int rw)
418 {
419         struct inode      *inode = osd_dt_obj(dt)->oo_inode;
420         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
421         struct page       *page;
422
423         LASSERT(inode);
424
425         page = find_or_create_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
426                                    GFP_NOFS | __GFP_HIGHMEM);
427         if (unlikely(page == NULL))
428                 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
429
430         return page;
431 }
432
433 /*
434  * there are following "locks":
435  * journal_start
436  * i_mutex
437  * page lock
438
439  * osd write path
440     * lock page(s)
441     * journal_start
442     * truncate_sem
443
444  * ext4 vmtruncate:
445     * lock pages, unlock
446     * journal_start
447     * lock partial page
448     * i_data_sem
449
450 */
451 int osd_bufs_get(const struct lu_env *env, struct dt_object *d, loff_t pos,
452                  ssize_t len, struct niobuf_local *lnb, int rw,
453                  struct lustre_capa *capa)
454 {
455         struct osd_object   *obj    = osd_dt_obj(d);
456         int npages, i, rc = 0;
457
458         LASSERT(obj->oo_inode);
459
460         osd_map_remote_to_local(pos, len, &npages, lnb);
461
462         for (i = 0; i < npages; i++, lnb++) {
463                 lnb->lnb_page = osd_get_page(d, lnb->lnb_file_offset, rw);
464                 if (lnb->lnb_page == NULL)
465                         GOTO(cleanup, rc = -ENOMEM);
466
467                 /* DLM locking protects us from write and truncate competing
468                  * for same region, but truncate can leave dirty page in the
469                  * cache. it's possible the writeout on a such a page is in
470                  * progress when we access it. it's also possible that during
471                  * this writeout we put new (partial) data, but then won't
472                  * be able to proceed in filter_commitrw_write(). thus let's
473                  * just wait for writeout completion, should be rare enough.
474                  * -bzzz */
475                 wait_on_page_writeback(lnb->lnb_page);
476                 BUG_ON(PageWriteback(lnb->lnb_page));
477
478                 lu_object_get(&d->do_lu);
479         }
480         rc = i;
481
482 cleanup:
483         RETURN(rc);
484 }
485
486 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
487                         struct niobuf_local *lnb, int npages)
488 {
489         int i;
490
491         for (i = 0; i < npages; i++) {
492                 if (lnb[i].lnb_page == NULL)
493                         continue;
494                 LASSERT(PageLocked(lnb[i].lnb_page));
495                 unlock_page(lnb[i].lnb_page);
496                 page_cache_release(lnb[i].lnb_page);
497                 lu_object_put(env, &dt->do_lu);
498                 lnb[i].lnb_page = NULL;
499         }
500
501         RETURN(0);
502 }
503
504 #ifndef HAVE_LDISKFS_MAP_BLOCKS
505
506 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
507 #define ldiskfs_ext_pblock(ex) ext_pblock((ex))
508 #endif
509
510 struct bpointers {
511         unsigned long *blocks;
512         unsigned long start;
513         int num;
514         int init_num;
515         int create;
516 };
517
518 static long ldiskfs_ext_find_goal(struct inode *inode,
519                                   struct ldiskfs_ext_path *path,
520                                   unsigned long block, int *aflags)
521 {
522         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
523         unsigned long bg_start;
524         unsigned long colour;
525         int depth;
526
527         if (path) {
528                 struct ldiskfs_extent *ex;
529                 depth = path->p_depth;
530
531                 /* try to predict block placement */
532                 if ((ex = path[depth].p_ext))
533                         return ldiskfs_ext_pblock(ex) +
534                                 (block - le32_to_cpu(ex->ee_block));
535
536                 /* it looks index is empty
537                  * try to find starting from index itself */
538                 if (path[depth].p_bh)
539                         return path[depth].p_bh->b_blocknr;
540         }
541
542         /* OK. use inode's group */
543         bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) +
544                 le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block);
545         colour = (current->pid % 16) *
546                 (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16);
547         return bg_start + colour + block;
548 }
549
550 static unsigned long new_blocks(handle_t *handle, struct inode *inode,
551                                 struct ldiskfs_ext_path *path,
552                                 unsigned long block, unsigned long *count,
553                                 int *err)
554 {
555         struct ldiskfs_allocation_request ar;
556         unsigned long pblock;
557         int aflags;
558
559         /* find neighbour allocated blocks */
560         ar.lleft = block;
561         *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
562         if (*err)
563                 return 0;
564         ar.lright = block;
565         *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright);
566         if (*err)
567                 return 0;
568
569         /* allocate new block */
570         ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags);
571         ar.inode = inode;
572         ar.logical = block;
573         ar.len = *count;
574         ar.flags = LDISKFS_MB_HINT_DATA;
575         pblock = ldiskfs_mb_new_blocks(handle, &ar, err);
576         *count = ar.len;
577         return pblock;
578 }
579
580 static int ldiskfs_ext_new_extent_cb(struct inode *inode,
581                                      struct ldiskfs_ext_path *path,
582                                      struct ldiskfs_ext_cache *cex,
583 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
584                                      struct ldiskfs_extent *ex,
585 #endif
586                                      void *cbdata)
587 {
588         struct bpointers *bp = cbdata;
589         struct ldiskfs_extent nex;
590         unsigned long pblock;
591         unsigned long tgen;
592         int err, i;
593         unsigned long count;
594         handle_t *handle;
595
596 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
597         if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) {
598 #else
599         if ((cex->ec_len != 0) && (cex->ec_start != 0)) {
600 #endif
601                 err = EXT_CONTINUE;
602                 goto map;
603         }
604
605         if (bp->create == 0) {
606                 i = 0;
607                 if (cex->ec_block < bp->start)
608                         i = bp->start - cex->ec_block;
609                 if (i >= cex->ec_len)
610                         CERROR("nothing to do?! i = %d, e_num = %u\n",
611                                         i, cex->ec_len);
612                 for (; i < cex->ec_len && bp->num; i++) {
613                         *(bp->blocks) = 0;
614                         bp->blocks++;
615                         bp->num--;
616                         bp->start++;
617                 }
618
619                 return EXT_CONTINUE;
620         }
621
622         tgen = LDISKFS_I(inode)->i_ext_generation;
623         count = ldiskfs_ext_calc_credits_for_insert(inode, path);
624
625         handle = osd_journal_start(inode, LDISKFS_HT_MISC,
626                                    count + LDISKFS_ALLOC_NEEDED + 1);
627         if (IS_ERR(handle)) {
628                 return PTR_ERR(handle);
629         }
630
631         if (tgen != LDISKFS_I(inode)->i_ext_generation) {
632                 /* the tree has changed. so path can be invalid at moment */
633                 ldiskfs_journal_stop(handle);
634                 return EXT_REPEAT;
635         }
636
637         /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not
638          * protected by i_data_sem as whole. so we patch it to store
639          * generation to path and now verify the tree hasn't changed */
640         down_write((&LDISKFS_I(inode)->i_data_sem));
641
642         /* validate extent, make sure the extent tree does not changed */
643         if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) {
644                 /* cex is invalid, try again */
645                 up_write(&LDISKFS_I(inode)->i_data_sem);
646                 ldiskfs_journal_stop(handle);
647                 return EXT_REPEAT;
648         }
649
650         count = cex->ec_len;
651         pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err);
652         if (!pblock)
653                 goto out;
654         BUG_ON(count > cex->ec_len);
655
656         /* insert new extent */
657         nex.ee_block = cpu_to_le32(cex->ec_block);
658         ldiskfs_ext_store_pblock(&nex, pblock);
659         nex.ee_len = cpu_to_le16(count);
660         err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0);
661         if (err) {
662                 /* free data blocks we just allocated */
663                 /* not a good idea to call discard here directly,
664                  * but otherwise we'd need to call it every free() */
665                 ldiskfs_discard_preallocations(inode);
666 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
667                 ldiskfs_free_blocks(handle, inode, NULL,
668                                     ldiskfs_ext_pblock(&nex),
669                                     le16_to_cpu(nex.ee_len), 0);
670 #else
671                 ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex),
672                                     le16_to_cpu(nex.ee_len), 0);
673 #endif
674                 goto out;
675         }
676
677         /*
678          * Putting len of the actual extent we just inserted,
679          * we are asking ldiskfs_ext_walk_space() to continue
680          * scaning after that block
681          */
682         cex->ec_len = le16_to_cpu(nex.ee_len);
683         cex->ec_start = ldiskfs_ext_pblock(&nex);
684         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
685         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
686
687 out:
688         up_write((&LDISKFS_I(inode)->i_data_sem));
689         ldiskfs_journal_stop(handle);
690 map:
691         if (err >= 0) {
692                 /* map blocks */
693                 if (bp->num == 0) {
694                         CERROR("hmm. why do we find this extent?\n");
695                         CERROR("initial space: %lu:%u\n",
696                                 bp->start, bp->init_num);
697 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
698                         CERROR("current extent: %u/%u/%llu %d\n",
699                                 cex->ec_block, cex->ec_len,
700                                 (unsigned long long)cex->ec_start,
701                                 cex->ec_type);
702 #else
703                         CERROR("current extent: %u/%u/%llu\n",
704                                 cex->ec_block, cex->ec_len,
705                                 (unsigned long long)cex->ec_start);
706 #endif
707                 }
708                 i = 0;
709                 if (cex->ec_block < bp->start)
710                         i = bp->start - cex->ec_block;
711                 if (i >= cex->ec_len)
712                         CERROR("nothing to do?! i = %d, e_num = %u\n",
713                                         i, cex->ec_len);
714                 for (; i < cex->ec_len && bp->num; i++) {
715                         *(bp->blocks) = cex->ec_start + i;
716 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
717                         if (cex->ec_type != LDISKFS_EXT_CACHE_EXTENT) {
718 #else
719                         if ((cex->ec_len == 0) || (cex->ec_start == 0)) {
720 #endif
721                                 /* unmap any possible underlying metadata from
722                                  * the block device mapping.  bug 6998. */
723                                 unmap_underlying_metadata(inode->i_sb->s_bdev,
724                                                           *(bp->blocks));
725                         }
726                         bp->blocks++;
727                         bp->num--;
728                         bp->start++;
729                 }
730         }
731         return err;
732 }
733
734 int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long block,
735                             unsigned long num, unsigned long *blocks,
736                             int create)
737 {
738         struct bpointers bp;
739         int err;
740
741         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
742                block, block + num - 1, (unsigned) inode->i_ino);
743
744         bp.blocks = blocks;
745         bp.start = block;
746         bp.init_num = bp.num = num;
747         bp.create = create;
748
749         err = ldiskfs_ext_walk_space(inode, block, num,
750                                          ldiskfs_ext_new_extent_cb, &bp);
751         ldiskfs_ext_invalidate_cache(inode);
752
753         return err;
754 }
755
756 int osd_ldiskfs_map_bm_inode_pages(struct inode *inode, struct page **page,
757                                    int pages, unsigned long *blocks,
758                                    int create)
759 {
760         int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
761         unsigned long *b;
762         int rc = 0, i;
763
764         for (i = 0, b = blocks; i < pages; i++, page++) {
765                 rc = ldiskfs_map_inode_page(inode, *page, b, create);
766                 if (rc) {
767                         CERROR("ino %lu, blk %lu create %d: rc %d\n",
768                                inode->i_ino, *b, create, rc);
769                         break;
770                 }
771                 b += blocks_per_page;
772         }
773         return rc;
774 }
775
776 int osd_ldiskfs_map_ext_inode_pages(struct inode *inode, struct page **page,
777                                     int pages, unsigned long *blocks,
778                                     int create)
779 {
780         int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
781         int rc = 0, i = 0;
782         struct page *fp = NULL;
783         int clen = 0;
784
785         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
786                 inode->i_ino, pages, (*page)->index);
787
788         /* pages are sorted already. so, we just have to find
789          * contig. space and process them properly */
790         while (i < pages) {
791                 if (fp == NULL) {
792                         /* start new extent */
793                         fp = *page++;
794                         clen = 1;
795                         i++;
796                         continue;
797                 } else if (fp->index + clen == (*page)->index) {
798                         /* continue the extent */
799                         page++;
800                         clen++;
801                         i++;
802                         continue;
803                 }
804
805                 /* process found extent */
806                 rc = osd_ldiskfs_map_nblocks(inode, fp->index * blocks_per_page,
807                                              clen * blocks_per_page, blocks,
808                                              create);
809                 if (rc)
810                         GOTO(cleanup, rc);
811
812                 /* look for next extent */
813                 fp = NULL;
814                 blocks += blocks_per_page * clen;
815         }
816
817         if (fp)
818                 rc = osd_ldiskfs_map_nblocks(inode, fp->index * blocks_per_page,
819                                              clen * blocks_per_page, blocks,
820                                              create);
821 cleanup:
822         return rc;
823 }
824
825 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
826                                        int pages, unsigned long *blocks,
827                                        int create)
828 {
829         int rc;
830
831         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
832                 rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
833                                                      blocks, create);
834                 return rc;
835         }
836         rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create);
837
838         return rc;
839 }
840 #else
841 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
842                                        int pages, unsigned long *blocks,
843                                        int create)
844 {
845         int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
846         int rc = 0, i = 0;
847         struct page *fp = NULL;
848         int clen = 0;
849
850         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
851                 inode->i_ino, pages, (*page)->index);
852
853         /* pages are sorted already. so, we just have to find
854          * contig. space and process them properly */
855         while (i < pages) {
856                 long blen, total = 0;
857                 handle_t *handle = NULL;
858                 struct ldiskfs_map_blocks map = { 0 };
859
860                 if (fp == NULL) { /* start new extent */
861                         fp = *page++;
862                         clen = 1;
863                         if (++i != pages)
864                                 continue;
865                 } else if (fp->index + clen == (*page)->index) {
866                         /* continue the extent */
867                         page++;
868                         clen++;
869                         if (++i != pages)
870                                 continue;
871                 }
872                 /* process found extent */
873                 map.m_lblk = fp->index * blocks_per_page;
874                 map.m_len = blen = clen * blocks_per_page;
875                 if (create) {
876                         create = LDISKFS_GET_BLOCKS_CREATE;
877                         handle = ldiskfs_journal_current_handle();
878                         LASSERT(handle != NULL);
879                 }
880 cont_map:
881                 rc = ldiskfs_map_blocks(handle, inode, &map, create);
882                 if (rc >= 0) {
883                         int c = 0;
884                         for (; total < blen && c < map.m_len; c++, total++) {
885                                 if (rc == 0) {
886                                         *(blocks + total) = 0;
887                                         total++;
888                                         break;
889                                 } else {
890                                         *(blocks + total) = map.m_pblk + c;
891                                         /* unmap any possible underlying
892                                          * metadata from the block device
893                                          * mapping.  bug 6998. */
894                                         if ((map.m_flags & LDISKFS_MAP_NEW) &&
895                                             create)
896                                                 unmap_underlying_metadata(
897                                                         inode->i_sb->s_bdev,
898                                                         map.m_pblk + c);
899                                 }
900                         }
901                         rc = 0;
902                 }
903                 if (rc == 0 && total < blen) {
904                         map.m_lblk = fp->index * blocks_per_page + total;
905                         map.m_len = blen - total;
906                         goto cont_map;
907                 }
908                 if (rc != 0)
909                         GOTO(cleanup, rc);
910
911                 /* look for next extent */
912                 fp = NULL;
913                 blocks += blocks_per_page * clen;
914         }
915 cleanup:
916         return rc;
917 }
918 #endif /* HAVE_LDISKFS_MAP_BLOCKS */
919
920 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
921                           struct niobuf_local *lnb, int npages)
922 {
923         struct osd_thread_info *oti   = osd_oti_get(env);
924         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
925         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
926         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
927         struct timeval          start;
928         struct timeval          end;
929         unsigned long           timediff;
930         ssize_t                 isize;
931         __s64                   maxidx;
932         int                     rc = 0;
933         int                     i;
934         int                     cache = 0;
935
936         LASSERT(inode);
937
938         rc = osd_init_iobuf(osd, iobuf, 0, npages);
939         if (unlikely(rc != 0))
940                 RETURN(rc);
941
942         isize = i_size_read(inode);
943         maxidx = ((isize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 1;
944
945         if (osd->od_writethrough_cache)
946                 cache = 1;
947         if (isize > osd->od_readcache_max_filesize)
948                 cache = 0;
949
950         do_gettimeofday(&start);
951         for (i = 0; i < npages; i++) {
952
953                 if (cache == 0)
954                         generic_error_remove_page(inode->i_mapping,
955                                                   lnb[i].lnb_page);
956
957                 /*
958                  * till commit the content of the page is undefined
959                  * we'll set it uptodate once bulk is done. otherwise
960                  * subsequent reads can access non-stable data
961                  */
962                 ClearPageUptodate(lnb[i].lnb_page);
963
964                 if (lnb[i].lnb_len == PAGE_CACHE_SIZE)
965                         continue;
966
967                 if (maxidx >= lnb[i].lnb_page->index) {
968                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
969                 } else {
970                         long off;
971                         char *p = kmap(lnb[i].lnb_page);
972
973                         off = lnb[i].lnb_page_offset;
974                         if (off)
975                                 memset(p, 0, off);
976                         off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
977                               ~CFS_PAGE_MASK;
978                         if (off)
979                                 memset(p + off, 0, PAGE_CACHE_SIZE - off);
980                         kunmap(lnb[i].lnb_page);
981                 }
982         }
983         do_gettimeofday(&end);
984         timediff = cfs_timeval_sub(&end, &start, NULL);
985         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
986
987         if (iobuf->dr_npages) {
988                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
989                                                  iobuf->dr_npages,
990                                                  iobuf->dr_blocks, 0);
991                 if (likely(rc == 0)) {
992                         rc = osd_do_bio(osd, inode, iobuf);
993                         /* do IO stats for preparation reads */
994                         osd_fini_iobuf(osd, iobuf);
995                 }
996         }
997         RETURN(rc);
998 }
999
1000 /* Check if a block is allocated or not */
1001 static int osd_is_mapped(struct inode *inode, obd_size offset)
1002 {
1003         sector_t (*fs_bmap)(struct address_space *, sector_t);
1004
1005         fs_bmap = inode->i_mapping->a_ops->bmap;
1006
1007         /* We can't know if we are overwriting or not */
1008         if (unlikely(fs_bmap == NULL))
1009                 return 0;
1010
1011         if (i_size_read(inode) == 0)
1012                 return 0;
1013
1014         /* Beyond EOF, must not be mapped */
1015         if (((i_size_read(inode) - 1) >> inode->i_blkbits) <
1016             (offset >> inode->i_blkbits))
1017                 return 0;
1018
1019         if (fs_bmap(inode->i_mapping, offset >> inode->i_blkbits) == 0)
1020                 return 0;
1021
1022         return 1;
1023 }
1024
1025 static int osd_declare_write_commit(const struct lu_env *env,
1026                                     struct dt_object *dt,
1027                                     struct niobuf_local *lnb, int npages,
1028                                     struct thandle *handle)
1029 {
1030         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1031         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1032         struct osd_thandle      *oh;
1033         int                      extents = 1;
1034         int                      depth;
1035         int                      i;
1036         int                      newblocks;
1037         int                      rc = 0;
1038         int                      flags = 0;
1039         bool                     ignore_quota = false;
1040         long long                quota_space = 0;
1041         ENTRY;
1042
1043         LASSERT(handle != NULL);
1044         oh = container_of0(handle, struct osd_thandle, ot_super);
1045         LASSERT(oh->ot_handle == NULL);
1046
1047         newblocks = npages;
1048
1049         /* calculate number of extents (probably better to pass nb) */
1050         for (i = 0; i < npages; i++) {
1051                 if (i && lnb[i].lnb_file_offset !=
1052                     lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
1053                         extents++;
1054
1055                 if (!osd_is_mapped(inode, lnb[i].lnb_file_offset))
1056                         quota_space += PAGE_CACHE_SIZE;
1057
1058                 /* ignore quota for the whole request if any page is from
1059                  * client cache or written by root.
1060                  *
1061                  * XXX once we drop the 1.8 client support, the checking
1062                  * for whether page is from cache can be simplified as:
1063                  * !(lnb[i].flags & OBD_BRW_SYNC)
1064                  *
1065                  * XXX we could handle this on per-lnb basis as done by
1066                  * grant. */
1067                 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1068                     (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1069                     OBD_BRW_FROM_GRANT)
1070                         ignore_quota = true;
1071         }
1072
1073         /*
1074          * each extent can go into new leaf causing a split
1075          * 5 is max tree depth: inode + 4 index blocks
1076          * with blockmaps, depth is 3 at most
1077          */
1078         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1079                 /*
1080                  * many concurrent threads may grow tree by the time
1081                  * our transaction starts. so, consider 2 is a min depth
1082                  */
1083                 depth = ext_depth(inode);
1084                 depth = max(depth, 1) + 1;
1085                 newblocks += depth;
1086                 oh->ot_credits++; /* inode */
1087                 oh->ot_credits += depth * 2 * extents;
1088         } else {
1089                 depth = 3;
1090                 newblocks += depth;
1091                 oh->ot_credits++; /* inode */
1092                 oh->ot_credits += depth * extents;
1093         }
1094
1095         /* quota space for metadata blocks */
1096         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1097
1098         /* quota space should be reported in 1K blocks */
1099         quota_space = toqb(quota_space);
1100
1101         /* each new block can go in different group (bitmap + gd) */
1102
1103         /* we can't dirty more bitmap blocks than exist */
1104         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1105                 oh->ot_credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1106         else
1107                 oh->ot_credits += newblocks;
1108
1109         /* we can't dirty more gd blocks than exist */
1110         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1111                 oh->ot_credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1112         else
1113                 oh->ot_credits += newblocks;
1114
1115         /* make sure the over quota flags were not set */
1116         lnb[0].lnb_flags &= ~(OBD_BRW_OVER_USRQUOTA | OBD_BRW_OVER_GRPQUOTA);
1117
1118         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1119                                    quota_space, oh, true, true, &flags,
1120                                    ignore_quota);
1121
1122         /* we need only to store the overquota flags in the first lnb for
1123          * now, once we support multiple objects BRW, this code needs be
1124          * revised. */
1125         if (flags & QUOTA_FL_OVER_USRQUOTA)
1126                 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1127         if (flags & QUOTA_FL_OVER_GRPQUOTA)
1128                 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1129
1130         RETURN(rc);
1131 }
1132
1133 /* Check if a block is allocated or not */
1134 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1135                             struct niobuf_local *lnb, int npages,
1136                             struct thandle *thandle)
1137 {
1138         struct osd_thread_info *oti = osd_oti_get(env);
1139         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1140         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1141         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1142         loff_t isize;
1143         int rc = 0, i;
1144
1145         LASSERT(inode);
1146
1147         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1148         if (unlikely(rc != 0))
1149                 RETURN(rc);
1150
1151         isize = i_size_read(inode);
1152         ll_vfs_dq_init(inode);
1153
1154         for (i = 0; i < npages; i++) {
1155                 if (lnb[i].lnb_rc == -ENOSPC &&
1156                     osd_is_mapped(inode, lnb[i].lnb_file_offset)) {
1157                         /* Allow the write to proceed if overwriting an
1158                          * existing block */
1159                         lnb[i].lnb_rc = 0;
1160                 }
1161
1162                 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1163                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1164                                lnb[i].lnb_rc);
1165                         LASSERT(lnb[i].lnb_page);
1166                         generic_error_remove_page(inode->i_mapping,
1167                                                   lnb[i].lnb_page);
1168                         continue;
1169                 }
1170
1171                 LASSERT(PageLocked(lnb[i].lnb_page));
1172                 LASSERT(!PageWriteback(lnb[i].lnb_page));
1173
1174                 if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize)
1175                         isize = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1176
1177                 /*
1178                  * Since write and truncate are serialized by oo_sem, even
1179                  * partial-page truncate should not leave dirty pages in the
1180                  * page cache.
1181                  */
1182                 LASSERT(!PageDirty(lnb[i].lnb_page));
1183
1184                 SetPageUptodate(lnb[i].lnb_page);
1185
1186                 osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1187         }
1188
1189         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1190                 rc = -ENOSPC;
1191         } else if (iobuf->dr_npages > 0) {
1192                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1193                                                  iobuf->dr_npages,
1194                                                  iobuf->dr_blocks, 1);
1195         } else {
1196                 /* no pages to write, no transno is needed */
1197                 thandle->th_local = 1;
1198         }
1199
1200         if (likely(rc == 0)) {
1201                 if (isize > i_size_read(inode)) {
1202                         i_size_write(inode, isize);
1203                         LDISKFS_I(inode)->i_disksize = isize;
1204                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1205                 }
1206
1207                 rc = osd_do_bio(osd, inode, iobuf);
1208                 /* we don't do stats here as in read path because
1209                  * write is async: we'll do this in osd_put_bufs() */
1210         } else {
1211                 osd_fini_iobuf(osd, iobuf);
1212         }
1213
1214         if (unlikely(rc != 0)) {
1215                 /* if write fails, we should drop pages from the cache */
1216                 for (i = 0; i < npages; i++) {
1217                         if (lnb[i].lnb_page == NULL)
1218                                 continue;
1219                         LASSERT(PageLocked(lnb[i].lnb_page));
1220                         generic_error_remove_page(inode->i_mapping,
1221                                                   lnb[i].lnb_page);
1222                 }
1223         }
1224
1225         RETURN(rc);
1226 }
1227
1228 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1229                          struct niobuf_local *lnb, int npages)
1230 {
1231         struct osd_thread_info *oti = osd_oti_get(env);
1232         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1233         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1234         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1235         struct timeval start, end;
1236         unsigned long timediff;
1237         int rc = 0, i, m = 0, cache = 0, cache_hits = 0, cache_misses = 0;
1238
1239         LASSERT(inode);
1240
1241         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1242         if (unlikely(rc != 0))
1243                 RETURN(rc);
1244
1245         if (osd->od_read_cache)
1246                 cache = 1;
1247         if (i_size_read(inode) > osd->od_readcache_max_filesize)
1248                 cache = 0;
1249
1250         do_gettimeofday(&start);
1251         for (i = 0; i < npages; i++) {
1252
1253                 if (i_size_read(inode) <= lnb[i].lnb_file_offset)
1254                         /* If there's no more data, abort early.
1255                          * lnb->lnb_rc == 0, so it's easy to detect later. */
1256                         break;
1257
1258                 if (i_size_read(inode) <
1259                     lnb[i].lnb_file_offset + lnb[i].lnb_len - 1)
1260                         lnb[i].lnb_rc = i_size_read(inode) -
1261                                 lnb[i].lnb_file_offset;
1262                 else
1263                         lnb[i].lnb_rc = lnb[i].lnb_len;
1264                 m += lnb[i].lnb_len;
1265
1266                 if (PageUptodate(lnb[i].lnb_page)) {
1267                         cache_hits++;
1268                 } else {
1269                         cache_misses++;
1270                         osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1271                 }
1272
1273                 if (cache == 0)
1274                         generic_error_remove_page(inode->i_mapping,
1275                                                   lnb[i].lnb_page);
1276         }
1277         do_gettimeofday(&end);
1278         timediff = cfs_timeval_sub(&end, &start, NULL);
1279         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1280
1281         if (cache_hits != 0)
1282                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1283                                     cache_hits);
1284         if (cache_misses != 0)
1285                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1286                                     cache_misses);
1287         if (cache_hits + cache_misses != 0)
1288                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1289                                     cache_hits + cache_misses);
1290
1291         if (iobuf->dr_npages) {
1292                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1293                                                  iobuf->dr_npages,
1294                                                  iobuf->dr_blocks, 0);
1295                 rc = osd_do_bio(osd, inode, iobuf);
1296
1297                 /* IO stats will be done in osd_bufs_put() */
1298         }
1299
1300         RETURN(rc);
1301 }
1302
1303 /*
1304  * XXX: Another layering violation for now.
1305  *
1306  * We don't want to use ->f_op->read methods, because generic file write
1307  *
1308  *         - serializes on ->i_sem, and
1309  *
1310  *         - does a lot of extra work like balance_dirty_pages(),
1311  *
1312  * which doesn't work for globally shared files like /last_rcvd.
1313  */
1314 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1315 {
1316         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1317
1318         memcpy(buffer, (char *)ei->i_data, buflen);
1319
1320         return  buflen;
1321 }
1322
1323 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1324 {
1325         struct buffer_head *bh;
1326         unsigned long block;
1327         int osize;
1328         int blocksize;
1329         int csize;
1330         int boffs;
1331         int err;
1332
1333         /* prevent reading after eof */
1334         spin_lock(&inode->i_lock);
1335         if (i_size_read(inode) < *offs + size) {
1336                 loff_t diff = i_size_read(inode) - *offs;
1337                 spin_unlock(&inode->i_lock);
1338                 if (diff < 0) {
1339                         CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
1340                                i_size_read(inode), *offs);
1341                         return -EBADR;
1342                 } else if (diff == 0) {
1343                         return 0;
1344                 } else {
1345                         size = diff;
1346                 }
1347         } else {
1348                 spin_unlock(&inode->i_lock);
1349         }
1350
1351         blocksize = 1 << inode->i_blkbits;
1352         osize = size;
1353         while (size > 0) {
1354                 block = *offs >> inode->i_blkbits;
1355                 boffs = *offs & (blocksize - 1);
1356                 csize = min(blocksize - boffs, size);
1357                 bh = ldiskfs_bread(NULL, inode, block, 0, &err);
1358                 if (!bh) {
1359                         CERROR("%s: can't read %u@%llu on ino %lu: rc = %d\n",
1360                                LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
1361                                csize, *offs, inode->i_ino, err);
1362                         return err;
1363                 }
1364
1365                 memcpy(buf, bh->b_data + boffs, csize);
1366                 brelse(bh);
1367
1368                 *offs += csize;
1369                 buf += csize;
1370                 size -= csize;
1371         }
1372         return osize;
1373 }
1374
1375 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1376                         struct lu_buf *buf, loff_t *pos,
1377                         struct lustre_capa *capa)
1378 {
1379         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1380         int           rc;
1381
1382         if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
1383                 RETURN(-EACCES);
1384
1385         /* Read small symlink from inode body as we need to maintain correct
1386          * on-disk symlinks for ldiskfs.
1387          */
1388         if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
1389             (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1390                 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
1391         else
1392                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1393
1394         return rc;
1395 }
1396
1397 static inline int osd_extents_enabled(struct super_block *sb,
1398                                       struct inode *inode)
1399 {
1400         if (inode != NULL) {
1401                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1402                         return 1;
1403         } else if (test_opt(sb, EXTENTS)) {
1404                 return 1;
1405         }
1406         return 0;
1407 }
1408
1409 static inline int osd_calc_bkmap_credits(struct super_block *sb,
1410                                          struct inode *inode,
1411                                          const loff_t size,
1412                                          const loff_t pos,
1413                                          const int blocks)
1414 {
1415         int credits, bits, bs, i;
1416
1417         bits = sb->s_blocksize_bits;
1418         bs = 1 << bits;
1419
1420         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1421          * we do not expect blockmaps on the large files,
1422          * so let's shrink it to 2 levels (4GB files) */
1423
1424         /* this is default reservation: 2 levels */
1425         credits = (blocks + 2) * 3;
1426
1427         /* actual offset is unknown, hard to optimize */
1428         if (pos == -1)
1429                 return credits;
1430
1431         /* now check for few specific cases to optimize */
1432         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1433                 /* no indirects */
1434                 credits = blocks;
1435                 /* allocate if not allocated */
1436                 if (inode == NULL) {
1437                         credits += blocks * 2;
1438                         return credits;
1439                 }
1440                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1441                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1442                         if (LDISKFS_I(inode)->i_data[i] == 0)
1443                                 credits += 2;
1444                 }
1445         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1446                 /* single indirect */
1447                 credits = blocks * 3;
1448                 /* probably indirect block has been allocated already */
1449                 if (!inode || LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK])
1450                         credits += 3;
1451         }
1452
1453         return credits;
1454 }
1455
1456 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1457                                  const struct lu_buf *buf, loff_t _pos,
1458                                  struct thandle *handle)
1459 {
1460         struct osd_object  *obj  = osd_dt_obj(dt);
1461         struct inode       *inode = obj->oo_inode;
1462         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1463         struct osd_thandle *oh;
1464         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1465         int                 bits, bs;
1466         int                 depth, size;
1467         loff_t              pos;
1468         ENTRY;
1469
1470         LASSERT(buf != NULL);
1471         LASSERT(handle != NULL);
1472
1473         oh = container_of0(handle, struct osd_thandle, ot_super);
1474         LASSERT(oh->ot_handle == NULL);
1475
1476         size = buf->lb_len;
1477         bits = sb->s_blocksize_bits;
1478         bs = 1 << bits;
1479
1480         if (_pos == -1) {
1481                 /* if this is an append, then we
1482                  * should expect cross-block record */
1483                 pos = 0;
1484         } else {
1485                 pos = _pos;
1486         }
1487
1488         /* blocks to modify */
1489         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1490         LASSERT(blocks > 0);
1491
1492         if (inode != NULL && _pos != -1) {
1493                 /* object size in blocks */
1494                 est = (i_size_read(inode) + bs - 1) >> bits;
1495                 allocated = inode->i_blocks >> (bits - 9);
1496                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1497                         /* looks like an overwrite, no need to modify tree */
1498                         credits = blocks;
1499                         /* no need to modify i_size */
1500                         goto out;
1501                 }
1502         }
1503
1504         if (osd_extents_enabled(sb, inode)) {
1505                 /*
1506                  * many concurrent threads may grow tree by the time
1507                  * our transaction starts. so, consider 2 is a min depth
1508                  * for every level we may need to allocate a new block
1509                  * and take some entries from the old one. so, 3 blocks
1510                  * to allocate (bitmap, gd, itself) + old block - 4 per
1511                  * level.
1512                  */
1513                 depth = inode != NULL ? ext_depth(inode) : 0;
1514                 depth = max(depth, 1) + 1;
1515                 credits = depth;
1516                 /* if not append, then split may need to modify
1517                  * existing blocks moving entries into the new ones */
1518                 if (_pos == -1)
1519                         credits += depth;
1520                 /* blocks to store data: bitmap,gd,itself */
1521                 credits += blocks * 3;
1522         } else {
1523                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1524         }
1525         /* if inode is created as part of the transaction,
1526          * then it's counted already by the creation method */
1527         if (inode != NULL)
1528                 credits++;
1529
1530 out:
1531
1532         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1533
1534         /* dt_declare_write() is usually called for system objects, such
1535          * as llog or last_rcvd files. We needn't enforce quota on those
1536          * objects, so always set the lqi_space as 0. */
1537         if (inode != NULL)
1538                 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1539                                            i_gid_read(inode), 0, oh, true,
1540                                            true, NULL, false);
1541         RETURN(rc);
1542 }
1543
1544 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1545 {
1546         /* LU-2634: clear the extent format for fast symlink */
1547         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1548
1549         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1550         LDISKFS_I(inode)->i_disksize = buflen;
1551         i_size_write(inode, buflen);
1552         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1553
1554         return 0;
1555 }
1556
1557 int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
1558                              int write_NUL, loff_t *offs, handle_t *handle)
1559 {
1560         struct buffer_head *bh        = NULL;
1561         loff_t              offset    = *offs;
1562         loff_t              new_size  = i_size_read(inode);
1563         unsigned long       block;
1564         int                 blocksize = 1 << inode->i_blkbits;
1565         int                 err = 0;
1566         int                 size;
1567         int                 boffs;
1568         int                 dirty_inode = 0;
1569
1570         if (write_NUL) {
1571                 /*
1572                  * long symlink write does not count the NUL terminator in
1573                  * bufsize, we write it, and the inode's file size does not
1574                  * count the NUL terminator as well.
1575                  */
1576                 ((char *)buf)[bufsize] = '\0';
1577                 ++bufsize;
1578         }
1579         while (bufsize > 0) {
1580                 if (bh != NULL)
1581                         brelse(bh);
1582
1583                 block = offset >> inode->i_blkbits;
1584                 boffs = offset & (blocksize - 1);
1585                 size = min(blocksize - boffs, bufsize);
1586                 bh = ldiskfs_bread(handle, inode, block, 1, &err);
1587                 if (!bh) {
1588                         CERROR("%s: error reading offset %llu (block %lu): "
1589                                "rc = %d\n",
1590                                inode->i_sb->s_id, offset, block, err);
1591                         break;
1592                 }
1593
1594                 err = ldiskfs_journal_get_write_access(handle, bh);
1595                 if (err) {
1596                         CERROR("journal_get_write_access() returned error %d\n",
1597                                err);
1598                         break;
1599                 }
1600                 LASSERTF(boffs + size <= bh->b_size,
1601                          "boffs %d size %d bh->b_size %lu\n",
1602                          boffs, size, (unsigned long)bh->b_size);
1603                 memcpy(bh->b_data + boffs, buf, size);
1604                 err = ldiskfs_journal_dirty_metadata(handle, bh);
1605                 if (err)
1606                         break;
1607
1608                 if (offset + size > new_size)
1609                         new_size = offset + size;
1610                 offset += size;
1611                 bufsize -= size;
1612                 buf += size;
1613         }
1614         if (bh)
1615                 brelse(bh);
1616
1617         if (write_NUL)
1618                 --new_size;
1619         /* correct in-core and on-disk sizes */
1620         if (new_size > i_size_read(inode)) {
1621                 spin_lock(&inode->i_lock);
1622                 if (new_size > i_size_read(inode))
1623                         i_size_write(inode, new_size);
1624                 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
1625                         LDISKFS_I(inode)->i_disksize = i_size_read(inode);
1626                         dirty_inode = 1;
1627                 }
1628                 spin_unlock(&inode->i_lock);
1629                 if (dirty_inode)
1630                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1631         }
1632
1633         if (err == 0)
1634                 *offs = offset;
1635         return err;
1636 }
1637
1638 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1639                          const struct lu_buf *buf, loff_t *pos,
1640                          struct thandle *handle, struct lustre_capa *capa,
1641                          int ignore_quota)
1642 {
1643         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1644         struct osd_thandle      *oh;
1645         ssize_t                 result;
1646         int                     is_link;
1647
1648         LASSERT(dt_object_exists(dt));
1649
1650         if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_WRITE))
1651                 return -EACCES;
1652
1653         LASSERT(handle != NULL);
1654         LASSERT(inode != NULL);
1655         ll_vfs_dq_init(inode);
1656
1657         /* XXX: don't check: one declared chunk can be used many times */
1658         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
1659
1660         oh = container_of(handle, struct osd_thandle, ot_super);
1661         LASSERT(oh->ot_handle->h_transaction != NULL);
1662         /* Write small symlink to inode body as we need to maintain correct
1663          * on-disk symlinks for ldiskfs.
1664          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
1665          * does not count it in.
1666          */
1667         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
1668         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1669                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
1670         else
1671                 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
1672                                                   buf->lb_len, is_link, pos,
1673                                                   oh->ot_handle);
1674         if (result == 0)
1675                 result = buf->lb_len;
1676         return result;
1677 }
1678
1679 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1680                              __u64 start, __u64 end, struct thandle *th)
1681 {
1682         struct osd_thandle *oh;
1683         struct inode       *inode;
1684         int                 rc;
1685         ENTRY;
1686
1687         LASSERT(th);
1688         oh = container_of(th, struct osd_thandle, ot_super);
1689
1690         /*
1691          * we don't need to reserve credits for whole truncate
1692          * it's not possible as truncate may need to free too many
1693          * blocks and that won't fit a single transaction. instead
1694          * we reserve credits to change i_size and put inode onto
1695          * orphan list. if needed truncate will extend or restart
1696          * transaction
1697          */
1698         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
1699                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
1700
1701         inode = osd_dt_obj(dt)->oo_inode;
1702         LASSERT(inode);
1703
1704         rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1705                                    0, oh, true, true, NULL, false);
1706         RETURN(rc);
1707 }
1708
1709 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1710                      __u64 start, __u64 end, struct thandle *th,
1711                      struct lustre_capa *capa)
1712 {
1713         struct osd_thandle *oh;
1714         struct osd_object  *obj = osd_dt_obj(dt);
1715         struct inode       *inode = obj->oo_inode;
1716         handle_t           *h;
1717         tid_t               tid;
1718         int                rc = 0, rc2 = 0;
1719         ENTRY;
1720
1721         LASSERT(end == OBD_OBJECT_EOF);
1722         LASSERT(dt_object_exists(dt));
1723         LASSERT(osd_invariant(obj));
1724         LASSERT(inode != NULL);
1725         ll_vfs_dq_init(inode);
1726
1727         LASSERT(th);
1728         oh = container_of(th, struct osd_thandle, ot_super);
1729         LASSERT(oh->ot_handle->h_transaction != NULL);
1730
1731         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
1732
1733         tid = oh->ot_handle->h_transaction->t_tid;
1734
1735         i_size_write(inode, start);
1736         ll_truncate_pagecache(inode, start);
1737 #ifdef HAVE_INODEOPS_TRUNCATE
1738         if (inode->i_op->truncate) {
1739                 inode->i_op->truncate(inode);
1740         } else
1741 #endif
1742                 ldiskfs_truncate(inode);
1743
1744         /*
1745          * For a partial-page truncate, flush the page to disk immediately to
1746          * avoid data corruption during direct disk write.  b=17397
1747          */
1748         if ((start & ~CFS_PAGE_MASK) != 0)
1749                 rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
1750
1751         h = journal_current_handle();
1752         LASSERT(h != NULL);
1753         LASSERT(h == oh->ot_handle);
1754
1755         if (tid != h->h_transaction->t_tid) {
1756                 int credits = oh->ot_credits;
1757                 /*
1758                  * transaction has changed during truncate
1759                  * we need to restart the handle with our credits
1760                  */
1761                 if (h->h_buffer_credits < credits) {
1762                         if (ldiskfs_journal_extend(h, credits))
1763                                 rc2 = ldiskfs_journal_restart(h, credits);
1764                 }
1765         }
1766
1767         RETURN(rc == 0 ? rc2 : rc);
1768 }
1769
1770 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
1771                           struct ll_user_fiemap *fm)
1772 {
1773         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1774         struct osd_thread_info *info   = osd_oti_get(env);
1775         struct dentry          *dentry = &info->oti_obj_dentry;
1776         struct file            *file   = &info->oti_file;
1777         mm_segment_t            saved_fs;
1778         int rc;
1779
1780         LASSERT(inode);
1781         dentry->d_inode = inode;
1782         dentry->d_sb = inode->i_sb;
1783         file->f_dentry = dentry;
1784         file->f_mapping = inode->i_mapping;
1785         file->f_op = inode->i_fop;
1786         set_file_inode(file, inode);
1787
1788         saved_fs = get_fs();
1789         set_fs(get_ds());
1790         /* ldiskfs_ioctl does not have a inode argument */
1791         if (inode->i_fop->unlocked_ioctl)
1792                 rc = inode->i_fop->unlocked_ioctl(file, FSFILT_IOC_FIEMAP,
1793                                                   (long)fm);
1794         else
1795                 rc = -ENOTTY;
1796         set_fs(saved_fs);
1797         return rc;
1798 }
1799
1800 /*
1801  * in some cases we may need declare methods for objects being created
1802  * e.g., when we create symlink
1803  */
1804 const struct dt_body_operations osd_body_ops_new = {
1805         .dbo_declare_write = osd_declare_write,
1806 };
1807
1808 const struct dt_body_operations osd_body_ops = {
1809         .dbo_read                 = osd_read,
1810         .dbo_declare_write        = osd_declare_write,
1811         .dbo_write                = osd_write,
1812         .dbo_bufs_get             = osd_bufs_get,
1813         .dbo_bufs_put             = osd_bufs_put,
1814         .dbo_write_prep           = osd_write_prep,
1815         .dbo_declare_write_commit = osd_declare_write_commit,
1816         .dbo_write_commit         = osd_write_commit,
1817         .dbo_read_prep            = osd_read_prep,
1818         .dbo_declare_punch         = osd_declare_punch,
1819         .dbo_punch                 = osd_punch,
1820         .dbo_fiemap_get           = osd_fiemap_get,
1821 };
1822