Whamcloud - gitweb
LU-2099 osd: clear iobuf up on I/O completion
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osd/osd_io.c
37  *
38  * body operations
39  *
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
42  *
43  */
44
45 /* LUSTRE_VERSION_CODE */
46 #include <lustre_ver.h>
47 /* prerequisite for linux/xattr.h */
48 #include <linux/types.h>
49 /* prerequisite for linux/xattr.h */
50 #include <linux/fs.h>
51
52 /*
53  * struct OBD_{ALLOC,FREE}*()
54  * OBD_FAIL_CHECK
55  */
56 #include <obd_support.h>
57
58 #include "osd_internal.h"
59
60 /* ext_depth() */
61 #include <ldiskfs/ldiskfs_extents.h>
62
63 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
64                             int rw, int line, int pages)
65 {
66         int blocks, i;
67
68         LASSERTF(iobuf->dr_elapsed_valid == 0,
69                  "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
70                  atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
71                  iobuf->dr_init_at);
72         LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
73
74         init_waitqueue_head(&iobuf->dr_wait);
75         atomic_set(&iobuf->dr_numreqs, 0);
76         iobuf->dr_npages = 0;
77         iobuf->dr_error = 0;
78         iobuf->dr_dev = d;
79         iobuf->dr_frags = 0;
80         iobuf->dr_elapsed = 0;
81         /* must be counted before, so assert */
82         iobuf->dr_rw = rw;
83         iobuf->dr_init_at = line;
84
85         blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
86         if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
87                 LASSERT(iobuf->dr_pg_buf.lb_len >=
88                         pages * sizeof(iobuf->dr_pages[0]));
89                 return 0;
90         }
91
92         /* start with 1MB for 4K blocks */
93         i = 256;
94         while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
95                 i <<= 1;
96
97         CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
98                (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
99         pages = i;
100         blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
101         iobuf->dr_max_pages = 0;
102         CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
103                (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
104
105         lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
106         iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
107         if (unlikely(iobuf->dr_blocks == NULL))
108                 return -ENOMEM;
109
110         lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
111         iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
112         if (unlikely(iobuf->dr_pages == NULL))
113                 return -ENOMEM;
114
115         iobuf->dr_max_pages = pages;
116
117         return 0;
118 }
119 #define osd_init_iobuf(dev, iobuf, rw, pages) \
120         __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
121
122 static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
123 {
124         LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
125         iobuf->dr_pages[iobuf->dr_npages++] = page;
126 }
127
128 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
129 {
130         int rw = iobuf->dr_rw;
131
132         if (iobuf->dr_elapsed_valid) {
133                 iobuf->dr_elapsed_valid = 0;
134                 LASSERT(iobuf->dr_dev == d);
135                 LASSERT(iobuf->dr_frags > 0);
136                 lprocfs_oh_tally(&d->od_brw_stats.
137                                  hist[BRW_R_DIO_FRAGS+rw],
138                                  iobuf->dr_frags);
139                 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
140                                       iobuf->dr_elapsed);
141         }
142 }
143
144 #ifndef REQ_WRITE /* pre-2.6.35 */
145 #define __REQ_WRITE BIO_RW
146 #endif
147
148 static void dio_complete_routine(struct bio *bio, int error)
149 {
150         struct osd_iobuf *iobuf = bio->bi_private;
151         struct bio_vec *bvl;
152         int i;
153
154         /* CAVEAT EMPTOR: possibly in IRQ context
155          * DO NOT record procfs stats here!!! */
156
157         if (unlikely(iobuf == NULL)) {
158                 CERROR("***** bio->bi_private is NULL!  This should never "
159                        "happen.  Normally, I would crash here, but instead I "
160                        "will dump the bio contents to the console.  Please "
161                        "report this to <http://jira.whamcloud.com/> , along "
162                        "with any interesting messages leading up to this point "
163                        "(like SCSI errors, perhaps).  Because bi_private is "
164                        "NULL, I can't wake up the thread that initiated this "
165                        "IO - you will probably have to reboot this node.\n");
166                 CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
167                        "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
168                        "bi_private: %p\n", bio->bi_next, bio->bi_flags,
169                        bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
170                        bio->bi_end_io, atomic_read(&bio->bi_cnt),
171                        bio->bi_private);
172                 return;
173         }
174
175         /* the check is outside of the cycle for performance reason -bzzz */
176         if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
177                 bio_for_each_segment(bvl, bio, i) {
178                         if (likely(error == 0))
179                                 SetPageUptodate(bvl->bv_page);
180                         LASSERT(PageLocked(bvl->bv_page));
181                 }
182                 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
183         } else {
184                 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
185         }
186
187         /* any real error is good enough -bzzz */
188         if (error != 0 && iobuf->dr_error == 0)
189                 iobuf->dr_error = error;
190
191         /*
192          * set dr_elapsed before dr_numreqs turns to 0, otherwise
193          * it's possible that service thread will see dr_numreqs
194          * is zero, but dr_elapsed is not set yet, leading to lost
195          * data in this processing and an assertion in a subsequent
196          * call to OSD.
197          */
198         if (atomic_read(&iobuf->dr_numreqs) == 1) {
199                 iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
200                 iobuf->dr_elapsed_valid = 1;
201         }
202         if (atomic_dec_and_test(&iobuf->dr_numreqs))
203                 wake_up(&iobuf->dr_wait);
204
205         /* Completed bios used to be chained off iobuf->dr_bios and freed in
206          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
207          * mempool when serious on-disk fragmentation was encountered,
208          * deadlocking the OST.  The bios are now released as soon as complete
209          * so the pool cannot be exhausted while IOs are competing. bug 10076 */
210         bio_put(bio);
211 }
212
213 static void record_start_io(struct osd_iobuf *iobuf, int size)
214 {
215         struct osd_device    *osd = iobuf->dr_dev;
216         struct obd_histogram *h = osd->od_brw_stats.hist;
217
218         iobuf->dr_frags++;
219         atomic_inc(&iobuf->dr_numreqs);
220
221         if (iobuf->dr_rw == 0) {
222                 atomic_inc(&osd->od_r_in_flight);
223                 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
224                                  atomic_read(&osd->od_r_in_flight));
225                 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
226         } else if (iobuf->dr_rw == 1) {
227                 atomic_inc(&osd->od_w_in_flight);
228                 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
229                                  atomic_read(&osd->od_w_in_flight));
230                 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
231         } else {
232                 LBUG();
233         }
234 }
235
236 static void osd_submit_bio(int rw, struct bio *bio)
237 {
238         LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
239         if (rw == 0)
240                 submit_bio(READ, bio);
241         else
242                 submit_bio(WRITE, bio);
243 }
244
245 static int can_be_merged(struct bio *bio, sector_t sector)
246 {
247         unsigned int size;
248
249         if (!bio)
250                 return 0;
251
252         size = bio->bi_size >> 9;
253         return bio->bi_sector + size == sector ? 1 : 0;
254 }
255
256 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
257                       struct osd_iobuf *iobuf)
258 {
259         int            blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
260         struct page  **pages = iobuf->dr_pages;
261         int            npages = iobuf->dr_npages;
262         unsigned long *blocks = iobuf->dr_blocks;
263         int            total_blocks = npages * blocks_per_page;
264         int            sector_bits = inode->i_sb->s_blocksize_bits - 9;
265         unsigned int   blocksize = inode->i_sb->s_blocksize;
266         struct bio    *bio = NULL;
267         struct page   *page;
268         unsigned int   page_offset;
269         sector_t       sector;
270         int            nblocks;
271         int            block_idx;
272         int            page_idx;
273         int            i;
274         int            rc = 0;
275         ENTRY;
276
277         LASSERT(iobuf->dr_npages == npages);
278
279         osd_brw_stats_update(osd, iobuf);
280         iobuf->dr_start_time = cfs_time_current();
281
282         for (page_idx = 0, block_idx = 0;
283              page_idx < npages;
284              page_idx++, block_idx += blocks_per_page) {
285
286                 page = pages[page_idx];
287                 LASSERT(block_idx + blocks_per_page <= total_blocks);
288
289                 for (i = 0, page_offset = 0;
290                      i < blocks_per_page;
291                      i += nblocks, page_offset += blocksize * nblocks) {
292
293                         nblocks = 1;
294
295                         if (blocks[block_idx + i] == 0) {  /* hole */
296                                 LASSERTF(iobuf->dr_rw == 0,
297                                          "page_idx %u, block_idx %u, i %u\n",
298                                          page_idx, block_idx, i);
299                                 memset(kmap(page) + page_offset, 0, blocksize);
300                                 kunmap(page);
301                                 continue;
302                         }
303
304                         sector = (sector_t)blocks[block_idx + i] << sector_bits;
305
306                         /* Additional contiguous file blocks? */
307                         while (i + nblocks < blocks_per_page &&
308                                (sector + (nblocks << sector_bits)) ==
309                                ((sector_t)blocks[block_idx + i + nblocks] <<
310                                 sector_bits))
311                                 nblocks++;
312
313                         if (bio != NULL &&
314                             can_be_merged(bio, sector) &&
315                             bio_add_page(bio, page,
316                                          blocksize * nblocks, page_offset) != 0)
317                                 continue;       /* added this frag OK */
318
319                         if (bio != NULL) {
320                                 struct request_queue *q =
321                                         bdev_get_queue(bio->bi_bdev);
322
323                                 /* Dang! I have to fragment this I/O */
324                                 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
325                                        "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
326                                        bio->bi_size,
327                                        bio->bi_vcnt, bio->bi_max_vecs,
328                                        bio->bi_size >> 9, queue_max_sectors(q),
329                                        bio_phys_segments(q, bio),
330                                        queue_max_phys_segments(q),
331                                        0, queue_max_hw_segments(q));
332
333                                 record_start_io(iobuf, bio->bi_size);
334                                 osd_submit_bio(iobuf->dr_rw, bio);
335                         }
336
337                         /* allocate new bio */
338                         bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
339                                                       (npages - page_idx) *
340                                                       blocks_per_page));
341                         if (bio == NULL) {
342                                 CERROR("Can't allocate bio %u*%u = %u pages\n",
343                                        (npages - page_idx), blocks_per_page,
344                                        (npages - page_idx) * blocks_per_page);
345                                 rc = -ENOMEM;
346                                 goto out;
347                         }
348
349                         bio->bi_bdev = inode->i_sb->s_bdev;
350                         bio->bi_sector = sector;
351                         bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
352                         bio->bi_end_io = dio_complete_routine;
353                         bio->bi_private = iobuf;
354
355                         rc = bio_add_page(bio, page,
356                                           blocksize * nblocks, page_offset);
357                         LASSERT(rc != 0);
358                 }
359         }
360
361         if (bio != NULL) {
362                 record_start_io(iobuf, bio->bi_size);
363                 osd_submit_bio(iobuf->dr_rw, bio);
364                 rc = 0;
365         }
366
367 out:
368         /* in order to achieve better IO throughput, we don't wait for writes
369          * completion here. instead we proceed with transaction commit in
370          * parallel and wait for IO completion once transaction is stopped
371          * see osd_trans_stop() for more details -bzzz */
372         if (iobuf->dr_rw == 0) {
373                 wait_event(iobuf->dr_wait,
374                            atomic_read(&iobuf->dr_numreqs) == 0);
375                 osd_fini_iobuf(osd, iobuf);
376         }
377
378         if (rc == 0)
379                 rc = iobuf->dr_error;
380         RETURN(rc);
381 }
382
383 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
384                                    struct niobuf_local *lnb)
385 {
386         ENTRY;
387
388         *nrpages = 0;
389
390         while (len > 0) {
391                 int poff = offset & (PAGE_CACHE_SIZE - 1);
392                 int plen = PAGE_CACHE_SIZE - poff;
393
394                 if (plen > len)
395                         plen = len;
396                 lnb->lnb_file_offset = offset;
397                 lnb->lnb_page_offset = poff;
398                 lnb->len = plen;
399                 /* lb->flags = rnb->flags; */
400                 lnb->flags = 0;
401                 lnb->page = NULL;
402                 lnb->rc = 0;
403
404                 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
405                          (long long) len);
406                 offset += plen;
407                 len -= plen;
408                 lnb++;
409                 (*nrpages)++;
410         }
411
412         RETURN(0);
413 }
414
415 struct page *osd_get_page(struct dt_object *dt, loff_t offset, int rw)
416 {
417         struct inode      *inode = osd_dt_obj(dt)->oo_inode;
418         struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
419         struct page       *page;
420
421         LASSERT(inode);
422
423         page = find_or_create_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
424                                    GFP_NOFS | __GFP_HIGHMEM);
425         if (unlikely(page == NULL))
426                 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
427
428         return page;
429 }
430
431 /*
432  * there are following "locks":
433  * journal_start
434  * i_mutex
435  * page lock
436
437  * osd write path
438     * lock page(s)
439     * journal_start
440     * truncate_sem
441
442  * ext4 vmtruncate:
443     * lock pages, unlock
444     * journal_start
445     * lock partial page
446     * i_data_sem
447
448 */
449 int osd_bufs_get(const struct lu_env *env, struct dt_object *d, loff_t pos,
450                  ssize_t len, struct niobuf_local *lnb, int rw,
451                  struct lustre_capa *capa)
452 {
453         struct osd_object   *obj    = osd_dt_obj(d);
454         int npages, i, rc = 0;
455
456         LASSERT(obj->oo_inode);
457
458         osd_map_remote_to_local(pos, len, &npages, lnb);
459
460         for (i = 0; i < npages; i++, lnb++) {
461
462                 /* We still set up for ungranted pages so that granted pages
463                  * can be written to disk as they were promised, and portals
464                  * needs to keep the pages all aligned properly. */
465                 lnb->dentry = (void *) obj;
466
467                 lnb->page = osd_get_page(d, lnb->lnb_file_offset, rw);
468                 if (lnb->page == NULL)
469                         GOTO(cleanup, rc = -ENOMEM);
470
471                 /* DLM locking protects us from write and truncate competing
472                  * for same region, but truncate can leave dirty page in the
473                  * cache. it's possible the writeout on a such a page is in
474                  * progress when we access it. it's also possible that during
475                  * this writeout we put new (partial) data, but then won't
476                  * be able to proceed in filter_commitrw_write(). thus let's
477                  * just wait for writeout completion, should be rare enough.
478                  * -bzzz */
479                 wait_on_page_writeback(lnb->page);
480                 BUG_ON(PageWriteback(lnb->page));
481
482                 lu_object_get(&d->do_lu);
483         }
484         rc = i;
485
486 cleanup:
487         RETURN(rc);
488 }
489
490 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
491                         struct niobuf_local *lnb, int npages)
492 {
493         int                     i;
494
495         for (i = 0; i < npages; i++) {
496                 if (lnb[i].page == NULL)
497                         continue;
498                 LASSERT(PageLocked(lnb[i].page));
499                 unlock_page(lnb[i].page);
500                 page_cache_release(lnb[i].page);
501                 lu_object_put(env, &dt->do_lu);
502                 lnb[i].page = NULL;
503         }
504         RETURN(0);
505 }
506
507 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
508 #define ldiskfs_ext_pblock(ex) ext_pblock((ex))
509 #endif
510
511 struct bpointers {
512         unsigned long *blocks;
513         unsigned long start;
514         int num;
515         int init_num;
516         int create;
517 };
518
519 static long ldiskfs_ext_find_goal(struct inode *inode,
520                                   struct ldiskfs_ext_path *path,
521                                   unsigned long block, int *aflags)
522 {
523         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
524         unsigned long bg_start;
525         unsigned long colour;
526         int depth;
527
528         if (path) {
529                 struct ldiskfs_extent *ex;
530                 depth = path->p_depth;
531
532                 /* try to predict block placement */
533                 if ((ex = path[depth].p_ext))
534                         return ldiskfs_ext_pblock(ex) +
535                                 (block - le32_to_cpu(ex->ee_block));
536
537                 /* it looks index is empty
538                  * try to find starting from index itself */
539                 if (path[depth].p_bh)
540                         return path[depth].p_bh->b_blocknr;
541         }
542
543         /* OK. use inode's group */
544         bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) +
545                 le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block);
546         colour = (current->pid % 16) *
547                 (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16);
548         return bg_start + colour + block;
549 }
550
551 static unsigned long new_blocks(handle_t *handle, struct inode *inode,
552                                 struct ldiskfs_ext_path *path,
553                                 unsigned long block, unsigned long *count,
554                                 int *err)
555 {
556         struct ldiskfs_allocation_request ar;
557         unsigned long pblock;
558         int aflags;
559
560         /* find neighbour allocated blocks */
561         ar.lleft = block;
562         *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
563         if (*err)
564                 return 0;
565         ar.lright = block;
566         *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright);
567         if (*err)
568                 return 0;
569
570         /* allocate new block */
571         ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags);
572         ar.inode = inode;
573         ar.logical = block;
574         ar.len = *count;
575         ar.flags = LDISKFS_MB_HINT_DATA;
576         pblock = ldiskfs_mb_new_blocks(handle, &ar, err);
577         *count = ar.len;
578         return pblock;
579 }
580
581 static int ldiskfs_ext_new_extent_cb(struct inode *inode,
582                                      struct ldiskfs_ext_path *path,
583                                      struct ldiskfs_ext_cache *cex,
584 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
585                                      struct ldiskfs_extent *ex,
586 #endif
587                                      void *cbdata)
588 {
589         struct bpointers *bp = cbdata;
590         struct ldiskfs_extent nex;
591         unsigned long pblock;
592         unsigned long tgen;
593         int err, i;
594         unsigned long count;
595         handle_t *handle;
596
597 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
598         if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) {
599 #else
600         if ((cex->ec_len != 0) && (cex->ec_start != 0)) {
601 #endif
602                 err = EXT_CONTINUE;
603                 goto map;
604         }
605
606         if (bp->create == 0) {
607                 i = 0;
608                 if (cex->ec_block < bp->start)
609                         i = bp->start - cex->ec_block;
610                 if (i >= cex->ec_len)
611                         CERROR("nothing to do?! i = %d, e_num = %u\n",
612                                         i, cex->ec_len);
613                 for (; i < cex->ec_len && bp->num; i++) {
614                         *(bp->blocks) = 0;
615                         bp->blocks++;
616                         bp->num--;
617                         bp->start++;
618                 }
619
620                 return EXT_CONTINUE;
621         }
622
623         tgen = LDISKFS_I(inode)->i_ext_generation;
624         count = ldiskfs_ext_calc_credits_for_insert(inode, path);
625
626         handle = ldiskfs_journal_start(inode, count + LDISKFS_ALLOC_NEEDED + 1);
627         if (IS_ERR(handle)) {
628                 return PTR_ERR(handle);
629         }
630
631         if (tgen != LDISKFS_I(inode)->i_ext_generation) {
632                 /* the tree has changed. so path can be invalid at moment */
633                 ldiskfs_journal_stop(handle);
634                 return EXT_REPEAT;
635         }
636
637         /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not
638          * protected by i_data_sem as whole. so we patch it to store
639          * generation to path and now verify the tree hasn't changed */
640         down_write((&LDISKFS_I(inode)->i_data_sem));
641
642         /* validate extent, make sure the extent tree does not changed */
643         if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) {
644                 /* cex is invalid, try again */
645                 up_write(&LDISKFS_I(inode)->i_data_sem);
646                 ldiskfs_journal_stop(handle);
647                 return EXT_REPEAT;
648         }
649
650         count = cex->ec_len;
651         pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err);
652         if (!pblock)
653                 goto out;
654         BUG_ON(count > cex->ec_len);
655
656         /* insert new extent */
657         nex.ee_block = cpu_to_le32(cex->ec_block);
658         ldiskfs_ext_store_pblock(&nex, pblock);
659         nex.ee_len = cpu_to_le16(count);
660         err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0);
661         if (err) {
662                 /* free data blocks we just allocated */
663                 /* not a good idea to call discard here directly,
664                  * but otherwise we'd need to call it every free() */
665                 ldiskfs_discard_preallocations(inode);
666 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
667                 ldiskfs_free_blocks(handle, inode, NULL, ldiskfs_ext_pblock(&nex),
668                                     cpu_to_le16(nex.ee_len), 0);
669 #else
670                 ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex),
671                                     cpu_to_le16(nex.ee_len), 0);
672 #endif
673                 goto out;
674         }
675
676         /*
677          * Putting len of the actual extent we just inserted,
678          * we are asking ldiskfs_ext_walk_space() to continue
679          * scaning after that block
680          */
681         cex->ec_len = le16_to_cpu(nex.ee_len);
682         cex->ec_start = ldiskfs_ext_pblock(&nex);
683         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
684         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
685
686 out:
687         up_write((&LDISKFS_I(inode)->i_data_sem));
688         ldiskfs_journal_stop(handle);
689 map:
690         if (err >= 0) {
691                 /* map blocks */
692                 if (bp->num == 0) {
693                         CERROR("hmm. why do we find this extent?\n");
694                         CERROR("initial space: %lu:%u\n",
695                                 bp->start, bp->init_num);
696 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
697                         CERROR("current extent: %u/%u/%llu %d\n",
698                                 cex->ec_block, cex->ec_len,
699                                 (unsigned long long)cex->ec_start,
700                                 cex->ec_type);
701 #else
702                         CERROR("current extent: %u/%u/%llu\n",
703                                 cex->ec_block, cex->ec_len,
704                                 (unsigned long long)cex->ec_start);
705 #endif
706                 }
707                 i = 0;
708                 if (cex->ec_block < bp->start)
709                         i = bp->start - cex->ec_block;
710                 if (i >= cex->ec_len)
711                         CERROR("nothing to do?! i = %d, e_num = %u\n",
712                                         i, cex->ec_len);
713                 for (; i < cex->ec_len && bp->num; i++) {
714                         *(bp->blocks) = cex->ec_start + i;
715 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
716                         if (cex->ec_type != LDISKFS_EXT_CACHE_EXTENT) {
717 #else
718                         if ((cex->ec_len == 0) || (cex->ec_start == 0)) {
719 #endif
720                                 /* unmap any possible underlying metadata from
721                                  * the block device mapping.  bug 6998. */
722                                 unmap_underlying_metadata(inode->i_sb->s_bdev,
723                                                           *(bp->blocks));
724                         }
725                         bp->blocks++;
726                         bp->num--;
727                         bp->start++;
728                 }
729         }
730         return err;
731 }
732
733 int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long block,
734                             unsigned long num, unsigned long *blocks,
735                             int create)
736 {
737         struct bpointers bp;
738         int err;
739
740         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
741                block, block + num - 1, (unsigned) inode->i_ino);
742
743         bp.blocks = blocks;
744         bp.start = block;
745         bp.init_num = bp.num = num;
746         bp.create = create;
747
748         err = ldiskfs_ext_walk_space(inode, block, num,
749                                          ldiskfs_ext_new_extent_cb, &bp);
750         ldiskfs_ext_invalidate_cache(inode);
751
752         return err;
753 }
754
755 int osd_ldiskfs_map_ext_inode_pages(struct inode *inode, struct page **page,
756                                     int pages, unsigned long *blocks,
757                                     int create)
758 {
759         int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
760         int rc = 0, i = 0;
761         struct page *fp = NULL;
762         int clen = 0;
763
764         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
765                 inode->i_ino, pages, (*page)->index);
766
767         /* pages are sorted already. so, we just have to find
768          * contig. space and process them properly */
769         while (i < pages) {
770                 if (fp == NULL) {
771                         /* start new extent */
772                         fp = *page++;
773                         clen = 1;
774                         i++;
775                         continue;
776                 } else if (fp->index + clen == (*page)->index) {
777                         /* continue the extent */
778                         page++;
779                         clen++;
780                         i++;
781                         continue;
782                 }
783
784                 /* process found extent */
785                 rc = osd_ldiskfs_map_nblocks(inode, fp->index * blocks_per_page,
786                                              clen * blocks_per_page, blocks,
787                                              create);
788                 if (rc)
789                         GOTO(cleanup, rc);
790
791                 /* look for next extent */
792                 fp = NULL;
793                 blocks += blocks_per_page * clen;
794         }
795
796         if (fp)
797                 rc = osd_ldiskfs_map_nblocks(inode, fp->index * blocks_per_page,
798                                              clen * blocks_per_page, blocks,
799                                              create);
800 cleanup:
801         return rc;
802 }
803
804 int osd_ldiskfs_map_bm_inode_pages(struct inode *inode, struct page **page,
805                                    int pages, unsigned long *blocks,
806                                    int create)
807 {
808         int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
809         unsigned long *b;
810         int rc = 0, i;
811
812         for (i = 0, b = blocks; i < pages; i++, page++) {
813                 rc = ldiskfs_map_inode_page(inode, *page, b, create);
814                 if (rc) {
815                         CERROR("ino %lu, blk %lu create %d: rc %d\n",
816                                inode->i_ino, *b, create, rc);
817                         break;
818                 }
819
820                 b += blocks_per_page;
821         }
822         return rc;
823 }
824
825 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
826                                        int pages, unsigned long *blocks,
827                                        int create, struct mutex *optional_mutex)
828 {
829         int rc;
830
831         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
832                 rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
833                                                      blocks, create);
834                 return rc;
835         }
836         if (optional_mutex != NULL)
837                 mutex_lock(optional_mutex);
838         rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create);
839         if (optional_mutex != NULL)
840                 mutex_unlock(optional_mutex);
841
842         return rc;
843 }
844
845 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
846                           struct niobuf_local *lnb, int npages)
847 {
848         struct osd_thread_info *oti   = osd_oti_get(env);
849         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
850         struct inode           *inode = osd_dt_obj(dt)->oo_inode;
851         struct osd_device      *osd   = osd_obj2dev(osd_dt_obj(dt));
852         struct timeval          start;
853         struct timeval          end;
854         unsigned long           timediff;
855         ssize_t                 isize;
856         __s64                   maxidx;
857         int                     rc = 0;
858         int                     i;
859         int                     cache = 0;
860
861         LASSERT(inode);
862
863         rc = osd_init_iobuf(osd, iobuf, 0, npages);
864         if (unlikely(rc != 0))
865                 RETURN(rc);
866
867         isize = i_size_read(inode);
868         maxidx = ((isize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 1;
869
870         if (osd->od_writethrough_cache)
871                 cache = 1;
872         if (isize > osd->od_readcache_max_filesize)
873                 cache = 0;
874
875         do_gettimeofday(&start);
876         for (i = 0; i < npages; i++) {
877
878                 if (cache == 0)
879                         generic_error_remove_page(inode->i_mapping,
880                                                   lnb[i].page);
881
882                 /*
883                  * till commit the content of the page is undefined
884                  * we'll set it uptodate once bulk is done. otherwise
885                  * subsequent reads can access non-stable data
886                  */
887                 ClearPageUptodate(lnb[i].page);
888
889                 if (lnb[i].len == PAGE_CACHE_SIZE)
890                         continue;
891
892                 if (maxidx >= lnb[i].page->index) {
893                         osd_iobuf_add_page(iobuf, lnb[i].page);
894                 } else {
895                         long off;
896                         char *p = kmap(lnb[i].page);
897
898                         off = lnb[i].lnb_page_offset;
899                         if (off)
900                                 memset(p, 0, off);
901                         off = (lnb[i].lnb_page_offset + lnb[i].len) &
902                               ~CFS_PAGE_MASK;
903                         if (off)
904                                 memset(p + off, 0, PAGE_CACHE_SIZE - off);
905                         kunmap(lnb[i].page);
906                 }
907         }
908         do_gettimeofday(&end);
909         timediff = cfs_timeval_sub(&end, &start, NULL);
910         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
911
912         if (iobuf->dr_npages) {
913                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
914                                                  iobuf->dr_npages,
915                                                  iobuf->dr_blocks,
916                                                  0, NULL);
917                 if (likely(rc == 0)) {
918                         rc = osd_do_bio(osd, inode, iobuf);
919                         /* do IO stats for preparation reads */
920                         osd_fini_iobuf(osd, iobuf);
921                 }
922         }
923         RETURN(rc);
924 }
925
926 /* Check if a block is allocated or not */
927 static int osd_is_mapped(struct inode *inode, obd_size offset)
928 {
929         sector_t (*fs_bmap)(struct address_space *, sector_t);
930
931         fs_bmap = inode->i_mapping->a_ops->bmap;
932
933         /* We can't know if we are overwriting or not */
934         if (unlikely(fs_bmap == NULL))
935                 return 0;
936
937         if (i_size_read(inode) == 0)
938                 return 0;
939
940         /* Beyond EOF, must not be mapped */
941         if (((i_size_read(inode) - 1) >> inode->i_blkbits) <
942             (offset >> inode->i_blkbits))
943                 return 0;
944
945         if (fs_bmap(inode->i_mapping, offset >> inode->i_blkbits) == 0)
946                 return 0;
947
948         return 1;
949 }
950
951 static int osd_declare_write_commit(const struct lu_env *env,
952                                     struct dt_object *dt,
953                                     struct niobuf_local *lnb, int npages,
954                                     struct thandle *handle)
955 {
956         const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
957         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
958         struct osd_thandle      *oh;
959         int                      extents = 1;
960         int                      depth;
961         int                      i;
962         int                      newblocks;
963         int                      rc = 0;
964         int                      flags = 0;
965         bool                     ignore_quota = false;
966         long long                quota_space = 0;
967         ENTRY;
968
969         LASSERT(handle != NULL);
970         oh = container_of0(handle, struct osd_thandle, ot_super);
971         LASSERT(oh->ot_handle == NULL);
972
973         newblocks = npages;
974
975         /* calculate number of extents (probably better to pass nb) */
976         for (i = 0; i < npages; i++) {
977                 if (i && lnb[i].lnb_file_offset !=
978                     lnb[i - 1].lnb_file_offset + lnb[i - 1].len)
979                         extents++;
980
981                 if (!osd_is_mapped(inode, lnb[i].lnb_file_offset))
982                         quota_space += PAGE_CACHE_SIZE;
983
984                 /* ignore quota for the whole request if any page is from
985                  * client cache or written by root.
986                  *
987                  * XXX once we drop the 1.8 client support, the checking
988                  * for whether page is from cache can be simplified as:
989                  * !(lnb[i].flags & OBD_BRW_SYNC)
990                  *
991                  * XXX we could handle this on per-lnb basis as done by
992                  * grant. */
993                 if ((lnb[i].flags & OBD_BRW_NOQUOTA) ||
994                     (lnb[i].flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
995                     OBD_BRW_FROM_GRANT)
996                         ignore_quota = true;
997         }
998
999         /*
1000          * each extent can go into new leaf causing a split
1001          * 5 is max tree depth: inode + 4 index blocks
1002          * with blockmaps, depth is 3 at most
1003          */
1004         if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1005                 /*
1006                  * many concurrent threads may grow tree by the time
1007                  * our transaction starts. so, consider 2 is a min depth
1008                  */
1009                 depth = ext_depth(inode);
1010                 depth = max(depth, 1) + 1;
1011                 newblocks += depth;
1012                 oh->ot_credits++; /* inode */
1013                 oh->ot_credits += depth * 2 * extents;
1014         } else {
1015                 depth = 3;
1016                 newblocks += depth;
1017                 oh->ot_credits++; /* inode */
1018                 oh->ot_credits += depth * extents;
1019         }
1020
1021         /* quota space for metadata blocks */
1022         quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1023
1024         /* quota space should be reported in 1K blocks */
1025         quota_space = toqb(quota_space);
1026
1027         /* each new block can go in different group (bitmap + gd) */
1028
1029         /* we can't dirty more bitmap blocks than exist */
1030         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1031                 oh->ot_credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1032         else
1033                 oh->ot_credits += newblocks;
1034
1035         /* we can't dirty more gd blocks than exist */
1036         if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1037                 oh->ot_credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1038         else
1039                 oh->ot_credits += newblocks;
1040
1041         /* make sure the over quota flags were not set */
1042         lnb[0].flags &= ~(OBD_BRW_OVER_USRQUOTA | OBD_BRW_OVER_GRPQUOTA);
1043
1044         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid,
1045                                    quota_space, oh, true, true, &flags,
1046                                    ignore_quota);
1047
1048         /* we need only to store the overquota flags in the first lnb for
1049          * now, once we support multiple objects BRW, this code needs be
1050          * revised. */
1051         if (flags & QUOTA_FL_OVER_USRQUOTA)
1052                 lnb[0].flags |= OBD_BRW_OVER_USRQUOTA;
1053         if (flags & QUOTA_FL_OVER_GRPQUOTA)
1054                 lnb[0].flags |= OBD_BRW_OVER_GRPQUOTA;
1055
1056         RETURN(rc);
1057 }
1058
1059 /* Check if a block is allocated or not */
1060 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1061                             struct niobuf_local *lnb, int npages,
1062                             struct thandle *thandle)
1063 {
1064         struct osd_thread_info *oti = osd_oti_get(env);
1065         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1066         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1067         struct osd_device  *osd = osd_obj2dev(osd_dt_obj(dt));
1068         loff_t isize;
1069         int rc = 0, i;
1070
1071         LASSERT(inode);
1072
1073         rc = osd_init_iobuf(osd, iobuf, 1, npages);
1074         if (unlikely(rc != 0))
1075                 RETURN(rc);
1076
1077         isize = i_size_read(inode);
1078         ll_vfs_dq_init(inode);
1079
1080         for (i = 0; i < npages; i++) {
1081                 if (lnb[i].rc == -ENOSPC &&
1082                     osd_is_mapped(inode, lnb[i].lnb_file_offset)) {
1083                         /* Allow the write to proceed if overwriting an
1084                          * existing block */
1085                         lnb[i].rc = 0;
1086                 }
1087
1088                 if (lnb[i].rc) { /* ENOSPC, network RPC error, etc. */
1089                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1090                                lnb[i].rc);
1091                         LASSERT(lnb[i].page);
1092                         generic_error_remove_page(inode->i_mapping,lnb[i].page);
1093                         continue;
1094                 }
1095
1096                 LASSERT(PageLocked(lnb[i].page));
1097                 LASSERT(!PageWriteback(lnb[i].page));
1098
1099                 if (lnb[i].lnb_file_offset + lnb[i].len > isize)
1100                         isize = lnb[i].lnb_file_offset + lnb[i].len;
1101
1102                 /*
1103                  * Since write and truncate are serialized by oo_sem, even
1104                  * partial-page truncate should not leave dirty pages in the
1105                  * page cache.
1106                  */
1107                 LASSERT(!PageDirty(lnb[i].page));
1108
1109                 SetPageUptodate(lnb[i].page);
1110
1111                 osd_iobuf_add_page(iobuf, lnb[i].page);
1112         }
1113
1114         if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1115                 rc = -ENOSPC;
1116         } else if (iobuf->dr_npages > 0) {
1117                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1118                                                  iobuf->dr_npages,
1119                                                  iobuf->dr_blocks,
1120                                                  1, NULL);
1121         } else {
1122                 /* no pages to write, no transno is needed */
1123                 thandle->th_local = 1;
1124         }
1125
1126         if (likely(rc == 0)) {
1127                 if (isize > i_size_read(inode)) {
1128                         i_size_write(inode, isize);
1129                         LDISKFS_I(inode)->i_disksize = isize;
1130                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1131                 }
1132
1133                 rc = osd_do_bio(osd, inode, iobuf);
1134                 /* we don't do stats here as in read path because
1135                  * write is async: we'll do this in osd_put_bufs() */
1136         } else {
1137                 osd_fini_iobuf(osd, iobuf);
1138         }
1139
1140         if (unlikely(rc != 0)) {
1141                 /* if write fails, we should drop pages from the cache */
1142                 for (i = 0; i < npages; i++) {
1143                         if (lnb[i].page == NULL)
1144                                 continue;
1145                         LASSERT(PageLocked(lnb[i].page));
1146                         generic_error_remove_page(inode->i_mapping,lnb[i].page);
1147                 }
1148         }
1149
1150         RETURN(rc);
1151 }
1152
1153 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1154                          struct niobuf_local *lnb, int npages)
1155 {
1156         struct osd_thread_info *oti = osd_oti_get(env);
1157         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1158         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1159         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1160         struct timeval start, end;
1161         unsigned long timediff;
1162         int rc = 0, i, m = 0, cache = 0;
1163
1164         LASSERT(inode);
1165
1166         rc = osd_init_iobuf(osd, iobuf, 0, npages);
1167         if (unlikely(rc != 0))
1168                 RETURN(rc);
1169
1170         if (osd->od_read_cache)
1171                 cache = 1;
1172         if (i_size_read(inode) > osd->od_readcache_max_filesize)
1173                 cache = 0;
1174
1175         do_gettimeofday(&start);
1176         for (i = 0; i < npages; i++) {
1177
1178                 if (i_size_read(inode) <= lnb[i].lnb_file_offset)
1179                         /* If there's no more data, abort early.
1180                          * lnb->rc == 0, so it's easy to detect later. */
1181                         break;
1182
1183                 if (i_size_read(inode) <
1184                     lnb[i].lnb_file_offset + lnb[i].len - 1)
1185                         lnb[i].rc = i_size_read(inode) - lnb[i].lnb_file_offset;
1186                 else
1187                         lnb[i].rc = lnb[i].len;
1188                 m += lnb[i].len;
1189
1190                 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS, 1);
1191                 if (PageUptodate(lnb[i].page)) {
1192                         lprocfs_counter_add(osd->od_stats,
1193                                             LPROC_OSD_CACHE_HIT, 1);
1194                 } else {
1195                         lprocfs_counter_add(osd->od_stats,
1196                                             LPROC_OSD_CACHE_MISS, 1);
1197                         osd_iobuf_add_page(iobuf, lnb[i].page);
1198                 }
1199                 if (cache == 0)
1200                         generic_error_remove_page(inode->i_mapping,lnb[i].page);
1201         }
1202         do_gettimeofday(&end);
1203         timediff = cfs_timeval_sub(&end, &start, NULL);
1204         lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1205
1206         if (iobuf->dr_npages) {
1207                 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1208                                                  iobuf->dr_npages,
1209                                                  iobuf->dr_blocks,
1210                                                  0, NULL);
1211                 rc = osd_do_bio(osd, inode, iobuf);
1212
1213                 /* IO stats will be done in osd_bufs_put() */
1214         }
1215
1216         RETURN(rc);
1217 }
1218
1219 /*
1220  * XXX: Another layering violation for now.
1221  *
1222  * We don't want to use ->f_op->read methods, because generic file write
1223  *
1224  *         - serializes on ->i_sem, and
1225  *
1226  *         - does a lot of extra work like balance_dirty_pages(),
1227  *
1228  * which doesn't work for globally shared files like /last_rcvd.
1229  */
1230 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1231 {
1232         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1233
1234         memcpy(buffer, (char *)ei->i_data, buflen);
1235
1236         return  buflen;
1237 }
1238
1239 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1240 {
1241         struct buffer_head *bh;
1242         unsigned long block;
1243         int osize;
1244         int blocksize;
1245         int csize;
1246         int boffs;
1247         int err;
1248
1249         /* prevent reading after eof */
1250         spin_lock(&inode->i_lock);
1251         if (i_size_read(inode) < *offs + size) {
1252                 loff_t diff = i_size_read(inode) - *offs;
1253                 spin_unlock(&inode->i_lock);
1254                 if (diff < 0) {
1255                         CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
1256                                i_size_read(inode), *offs);
1257                         return -EBADR;
1258                 } else if (diff == 0) {
1259                         return 0;
1260                 } else {
1261                         size = diff;
1262                 }
1263         } else {
1264                 spin_unlock(&inode->i_lock);
1265         }
1266
1267         blocksize = 1 << inode->i_blkbits;
1268         osize = size;
1269         while (size > 0) {
1270                 block = *offs >> inode->i_blkbits;
1271                 boffs = *offs & (blocksize - 1);
1272                 csize = min(blocksize - boffs, size);
1273                 bh = ldiskfs_bread(NULL, inode, block, 0, &err);
1274                 if (!bh) {
1275                         CERROR("%s: can't read %u@%llu on ino %lu: rc = %d\n",
1276                                LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
1277                                csize, *offs, inode->i_ino, err);
1278                         return err;
1279                 }
1280
1281                 memcpy(buf, bh->b_data + boffs, csize);
1282                 brelse(bh);
1283
1284                 *offs += csize;
1285                 buf += csize;
1286                 size -= csize;
1287         }
1288         return osize;
1289 }
1290
1291 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1292                         struct lu_buf *buf, loff_t *pos,
1293                         struct lustre_capa *capa)
1294 {
1295         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1296         int           rc;
1297
1298         if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
1299                 RETURN(-EACCES);
1300
1301         /* Read small symlink from inode body as we need to maintain correct
1302          * on-disk symlinks for ldiskfs.
1303          */
1304         if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
1305             (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1306                 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
1307         else
1308                 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1309
1310         return rc;
1311 }
1312
1313 static inline int osd_extents_enabled(struct super_block *sb,
1314                                       struct inode *inode)
1315 {
1316         if (inode != NULL) {
1317                 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1318                         return 1;
1319         } else if (test_opt(sb, EXTENTS)) {
1320                 return 1;
1321         }
1322         return 0;
1323 }
1324
1325 static inline int osd_calc_bkmap_credits(struct super_block *sb,
1326                                          struct inode *inode,
1327                                          const loff_t size,
1328                                          const loff_t pos,
1329                                          const int blocks)
1330 {
1331         int credits, bits, bs, i;
1332
1333         bits = sb->s_blocksize_bits;
1334         bs = 1 << bits;
1335
1336         /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1337          * we do not expect blockmaps on the large files,
1338          * so let's shrink it to 2 levels (4GB files) */
1339
1340         /* this is default reservation: 2 levels */
1341         credits = (blocks + 2) * 3;
1342
1343         /* actual offset is unknown, hard to optimize */
1344         if (pos == -1)
1345                 return credits;
1346
1347         /* now check for few specific cases to optimize */
1348         if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1349                 /* no indirects */
1350                 credits = blocks;
1351                 /* allocate if not allocated */
1352                 if (inode == NULL) {
1353                         credits += blocks * 2;
1354                         return credits;
1355                 }
1356                 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1357                         LASSERT(i < LDISKFS_NDIR_BLOCKS);
1358                         if (LDISKFS_I(inode)->i_data[i] == 0)
1359                                 credits += 2;
1360                 }
1361         } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1362                 /* single indirect */
1363                 credits = blocks * 3;
1364                 /* probably indirect block has been allocated already */
1365                 if (!inode || LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK])
1366                         credits += 3;
1367         }
1368
1369         return credits;
1370 }
1371
1372 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1373                                  const struct lu_buf *buf, loff_t _pos,
1374                                  struct thandle *handle)
1375 {
1376         struct osd_object  *obj  = osd_dt_obj(dt);
1377         struct inode       *inode = obj->oo_inode;
1378         struct super_block *sb = osd_sb(osd_obj2dev(obj));
1379         struct osd_thandle *oh;
1380         int                 rc = 0, est = 0, credits, blocks, allocated = 0;
1381         int                 bits, bs;
1382         int                 depth, size;
1383         loff_t              pos;
1384         ENTRY;
1385
1386         LASSERT(buf != NULL);
1387         LASSERT(handle != NULL);
1388
1389         oh = container_of0(handle, struct osd_thandle, ot_super);
1390         LASSERT(oh->ot_handle == NULL);
1391
1392         size = buf->lb_len;
1393         bits = sb->s_blocksize_bits;
1394         bs = 1 << bits;
1395
1396         if (_pos == -1) {
1397                 /* if this is an append, then we
1398                  * should expect cross-block record */
1399                 pos = 0;
1400         } else {
1401                 pos = _pos;
1402         }
1403
1404         /* blocks to modify */
1405         blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1406         LASSERT(blocks > 0);
1407
1408         if (inode != NULL && _pos != -1) {
1409                 /* object size in blocks */
1410                 est = (i_size_read(inode) + bs - 1) >> bits;
1411                 allocated = inode->i_blocks >> (bits - 9);
1412                 if (pos + size <= i_size_read(inode) && est <= allocated) {
1413                         /* looks like an overwrite, no need to modify tree */
1414                         credits = blocks;
1415                         /* no need to modify i_size */
1416                         goto out;
1417                 }
1418         }
1419
1420         if (osd_extents_enabled(sb, inode)) {
1421                 /*
1422                  * many concurrent threads may grow tree by the time
1423                  * our transaction starts. so, consider 2 is a min depth
1424                  * for every level we may need to allocate a new block
1425                  * and take some entries from the old one. so, 3 blocks
1426                  * to allocate (bitmap, gd, itself) + old block - 4 per
1427                  * level.
1428                  */
1429                 depth = inode != NULL ? ext_depth(inode) : 0;
1430                 depth = max(depth, 1) + 1;
1431                 credits = depth;
1432                 /* if not append, then split may need to modify
1433                  * existing blocks moving entries into the new ones */
1434                 if (_pos == -1)
1435                         credits += depth;
1436                 /* blocks to store data: bitmap,gd,itself */
1437                 credits += blocks * 3;
1438         } else {
1439                 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1440         }
1441         /* if inode is created as part of the transaction,
1442          * then it's counted already by the creation method */
1443         if (inode != NULL)
1444                 credits++;
1445
1446 out:
1447
1448         osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1449
1450         /* dt_declare_write() is usually called for system objects, such
1451          * as llog or last_rcvd files. We needn't enforce quota on those
1452          * objects, so always set the lqi_space as 0. */
1453         if (inode != NULL)
1454                 rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid,
1455                                            0, oh, true, true, NULL, false);
1456         RETURN(rc);
1457 }
1458
1459 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1460 {
1461         /* LU-2634: clear the extent format for fast symlink */
1462         ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1463
1464         memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1465         LDISKFS_I(inode)->i_disksize = buflen;
1466         i_size_write(inode, buflen);
1467         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1468
1469         return 0;
1470 }
1471
1472 int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
1473                              int write_NUL, loff_t *offs, handle_t *handle)
1474 {
1475         struct buffer_head *bh        = NULL;
1476         loff_t              offset    = *offs;
1477         loff_t              new_size  = i_size_read(inode);
1478         unsigned long       block;
1479         int                 blocksize = 1 << inode->i_blkbits;
1480         int                 err = 0;
1481         int                 size;
1482         int                 boffs;
1483         int                 dirty_inode = 0;
1484
1485         if (write_NUL) {
1486                 /*
1487                  * long symlink write does not count the NUL terminator in
1488                  * bufsize, we write it, and the inode's file size does not
1489                  * count the NUL terminator as well.
1490                  */
1491                 ((char *)buf)[bufsize] = '\0';
1492                 ++bufsize;
1493         }
1494         while (bufsize > 0) {
1495                 if (bh != NULL)
1496                         brelse(bh);
1497
1498                 block = offset >> inode->i_blkbits;
1499                 boffs = offset & (blocksize - 1);
1500                 size = min(blocksize - boffs, bufsize);
1501                 bh = ldiskfs_bread(handle, inode, block, 1, &err);
1502                 if (!bh) {
1503                         CERROR("%s: error reading offset %llu (block %lu): "
1504                                "rc = %d\n",
1505                                inode->i_sb->s_id, offset, block, err);
1506                         break;
1507                 }
1508
1509                 err = ldiskfs_journal_get_write_access(handle, bh);
1510                 if (err) {
1511                         CERROR("journal_get_write_access() returned error %d\n",
1512                                err);
1513                         break;
1514                 }
1515                 LASSERTF(boffs + size <= bh->b_size,
1516                          "boffs %d size %d bh->b_size %lu",
1517                          boffs, size, (unsigned long)bh->b_size);
1518                 memcpy(bh->b_data + boffs, buf, size);
1519                 err = ldiskfs_journal_dirty_metadata(handle, bh);
1520                 if (err)
1521                         break;
1522
1523                 if (offset + size > new_size)
1524                         new_size = offset + size;
1525                 offset += size;
1526                 bufsize -= size;
1527                 buf += size;
1528         }
1529         if (bh)
1530                 brelse(bh);
1531
1532         if (write_NUL)
1533                 --new_size;
1534         /* correct in-core and on-disk sizes */
1535         if (new_size > i_size_read(inode)) {
1536                 spin_lock(&inode->i_lock);
1537                 if (new_size > i_size_read(inode))
1538                         i_size_write(inode, new_size);
1539                 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
1540                         LDISKFS_I(inode)->i_disksize = i_size_read(inode);
1541                         dirty_inode = 1;
1542                 }
1543                 spin_unlock(&inode->i_lock);
1544                 if (dirty_inode)
1545                         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1546         }
1547
1548         if (err == 0)
1549                 *offs = offset;
1550         return err;
1551 }
1552
1553 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1554                          const struct lu_buf *buf, loff_t *pos,
1555                          struct thandle *handle, struct lustre_capa *capa,
1556                          int ignore_quota)
1557 {
1558         struct inode            *inode = osd_dt_obj(dt)->oo_inode;
1559         struct osd_thandle      *oh;
1560         ssize_t                 result;
1561         int                     is_link;
1562
1563         LASSERT(dt_object_exists(dt));
1564
1565         if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_WRITE))
1566                 return -EACCES;
1567
1568         LASSERT(handle != NULL);
1569         LASSERT(inode != NULL);
1570         ll_vfs_dq_init(inode);
1571
1572         /* XXX: don't check: one declared chunk can be used many times */
1573         /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
1574
1575         oh = container_of(handle, struct osd_thandle, ot_super);
1576         LASSERT(oh->ot_handle->h_transaction != NULL);
1577         /* Write small symlink to inode body as we need to maintain correct
1578          * on-disk symlinks for ldiskfs.
1579          * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
1580          * does not count it in.
1581          */
1582         is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
1583         if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1584                 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
1585         else
1586                 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
1587                                                   buf->lb_len, is_link, pos,
1588                                                   oh->ot_handle);
1589         if (result == 0)
1590                 result = buf->lb_len;
1591         return result;
1592 }
1593
1594 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1595                              __u64 start, __u64 end, struct thandle *th)
1596 {
1597         struct osd_thandle *oh;
1598         struct inode       *inode;
1599         int                 rc;
1600         ENTRY;
1601
1602         LASSERT(th);
1603         oh = container_of(th, struct osd_thandle, ot_super);
1604
1605         /*
1606          * we don't need to reserve credits for whole truncate
1607          * it's not possible as truncate may need to free too many
1608          * blocks and that won't fit a single transaction. instead
1609          * we reserve credits to change i_size and put inode onto
1610          * orphan list. if needed truncate will extend or restart
1611          * transaction
1612          */
1613         osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
1614                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
1615
1616         inode = osd_dt_obj(dt)->oo_inode;
1617         LASSERT(inode);
1618
1619         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
1620                                    true, true, NULL, false);
1621         RETURN(rc);
1622 }
1623
1624 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1625                      __u64 start, __u64 end, struct thandle *th,
1626                      struct lustre_capa *capa)
1627 {
1628         struct osd_thandle *oh;
1629         struct osd_object  *obj = osd_dt_obj(dt);
1630         struct inode       *inode = obj->oo_inode;
1631         handle_t           *h;
1632         tid_t               tid;
1633         int                rc = 0, rc2 = 0;
1634         ENTRY;
1635
1636         LASSERT(end == OBD_OBJECT_EOF);
1637         LASSERT(dt_object_exists(dt));
1638         LASSERT(osd_invariant(obj));
1639         LASSERT(inode != NULL);
1640         ll_vfs_dq_init(inode);
1641
1642         LASSERT(th);
1643         oh = container_of(th, struct osd_thandle, ot_super);
1644         LASSERT(oh->ot_handle->h_transaction != NULL);
1645
1646         osd_trans_exec_op(env, th, OSD_OT_PUNCH);
1647
1648         tid = oh->ot_handle->h_transaction->t_tid;
1649
1650         i_size_write(inode, start);
1651         ll_truncate_pagecache(inode, start);
1652 #ifdef HAVE_INODEOPS_TRUNCATE
1653         if (inode->i_op->truncate) {
1654                 inode->i_op->truncate(inode);
1655         } else
1656 #endif
1657                 ldiskfs_truncate(inode);
1658
1659         /*
1660          * For a partial-page truncate, flush the page to disk immediately to
1661          * avoid data corruption during direct disk write.  b=17397
1662          */
1663         if ((start & ~CFS_PAGE_MASK) != 0)
1664                 rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
1665
1666         h = journal_current_handle();
1667         LASSERT(h != NULL);
1668         LASSERT(h == oh->ot_handle);
1669
1670         if (tid != h->h_transaction->t_tid) {
1671                 int credits = oh->ot_credits;
1672                 /*
1673                  * transaction has changed during truncate
1674                  * we need to restart the handle with our credits
1675                  */
1676                 if (h->h_buffer_credits < credits) {
1677                         if (ldiskfs_journal_extend(h, credits))
1678                                 rc2 = ldiskfs_journal_restart(h, credits);
1679                 }
1680         }
1681
1682         RETURN(rc == 0 ? rc2 : rc);
1683 }
1684
1685 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
1686                           struct ll_user_fiemap *fm)
1687 {
1688         struct inode *inode = osd_dt_obj(dt)->oo_inode;
1689         struct osd_thread_info *info   = osd_oti_get(env);
1690         struct dentry          *dentry = &info->oti_obj_dentry;
1691         struct file            *file   = &info->oti_file;
1692         mm_segment_t            saved_fs;
1693         int rc;
1694
1695         LASSERT(inode);
1696         dentry->d_inode = inode;
1697         dentry->d_sb = inode->i_sb;
1698         file->f_dentry = dentry;
1699         file->f_mapping = inode->i_mapping;
1700         file->f_op = inode->i_fop;
1701         set_file_inode(file, inode);
1702
1703         saved_fs = get_fs();
1704         set_fs(get_ds());
1705         /* ldiskfs_ioctl does not have a inode argument */
1706         if (inode->i_fop->unlocked_ioctl)
1707                 rc = inode->i_fop->unlocked_ioctl(file, FSFILT_IOC_FIEMAP,
1708                                                   (long)fm);
1709         else
1710                 rc = -ENOTTY;
1711         set_fs(saved_fs);
1712         return rc;
1713 }
1714
1715 /*
1716  * in some cases we may need declare methods for objects being created
1717  * e.g., when we create symlink
1718  */
1719 const struct dt_body_operations osd_body_ops_new = {
1720         .dbo_declare_write = osd_declare_write,
1721 };
1722
1723 const struct dt_body_operations osd_body_ops = {
1724         .dbo_read                 = osd_read,
1725         .dbo_declare_write        = osd_declare_write,
1726         .dbo_write                = osd_write,
1727         .dbo_bufs_get             = osd_bufs_get,
1728         .dbo_bufs_put             = osd_bufs_put,
1729         .dbo_write_prep           = osd_write_prep,
1730         .dbo_declare_write_commit = osd_declare_write_commit,
1731         .dbo_write_commit         = osd_write_commit,
1732         .dbo_read_prep            = osd_read_prep,
1733         .dbo_declare_punch         = osd_declare_punch,
1734         .dbo_punch                 = osd_punch,
1735         .dbo_fiemap_get           = osd_fiemap_get,
1736 };
1737