Whamcloud - gitweb
8ca8f19e62e4b5f460f6a321b12bd9ed9cffb5ed
[fs/lustre-release.git] / lustre / lvfs / fsfilt_ext3.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/lvfs/fsfilt_ext3.c
37  *
38  * Author: Andreas Dilger <adilger@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_FILTER
42
43 #include <linux/init.h>
44 #include <linux/module.h>
45 #include <linux/fs.h>
46 #include <linux/slab.h>
47 #include <linux/pagemap.h>
48 #include <ldiskfs/ldiskfs_config.h>
49 #include <ext4/ext4.h>
50 #include <ext4/ext4_jbd2.h>
51 #include <linux/version.h>
52 #include <linux/bitops.h>
53 #include <linux/quota.h>
54
55 #include <libcfs/libcfs.h>
56 #include <lustre_fsfilt.h>
57 #include <obd.h>
58 #include <linux/lustre_compat25.h>
59 #include <linux/lprocfs_status.h>
60
61 #include <ext4/ext4_extents.h>
62
63 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
64 #define ext3_ext_pblock(ex) ext_pblock((ex))
65 #endif
66
67 /* for kernels 2.6.18 and later */
68 #define fsfilt_ext3_ext_insert_extent(handle, inode, path, newext, flag) \
69                ext3_ext_insert_extent(handle, inode, path, newext, flag)
70
71 #define ext3_mb_discard_inode_preallocations(inode) \
72                  ext3_discard_preallocations(inode)
73
74 #ifndef EXT3_EXTENTS_FL
75 #define EXT3_EXTENTS_FL                 0x00080000 /* Inode uses extents */
76 #endif
77
78 #ifndef EXT_ASSERT
79 #define EXT_ASSERT(cond)  BUG_ON(!(cond))
80 #endif
81
82 #define EXT_GENERATION(inode)           (EXT4_I(inode)->i_ext_generation)
83 #define ext3_ext_base                   inode
84 #define ext3_ext_base2inode(inode)      (inode)
85 #define EXT_DEPTH(inode)                ext_depth(inode)
86 #define fsfilt_ext3_ext_walk_space(inode, block, num, cb, cbdata) \
87                         ext3_ext_walk_space(inode, block, num, cb, cbdata);
88
89 struct bpointers {
90         unsigned long *blocks;
91         unsigned long start;
92         int num;
93         int init_num;
94         int create;
95 };
96
97 static long ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
98                                unsigned long block, int *aflags)
99 {
100         struct ext3_inode_info *ei = EXT3_I(inode);
101         unsigned long bg_start;
102         unsigned long colour;
103         int depth;
104
105         if (path) {
106                 struct ext3_extent *ex;
107                 depth = path->p_depth;
108
109                 /* try to predict block placement */
110                 if ((ex = path[depth].p_ext))
111                         return ext4_ext_pblock(ex) + (block - le32_to_cpu(ex->ee_block));
112
113                 /* it looks index is empty
114                  * try to find starting from index itself */
115                 if (path[depth].p_bh)
116                         return path[depth].p_bh->b_blocknr;
117         }
118
119         /* OK. use inode's group */
120         bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
121                 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
122         colour = (current->pid % 16) *
123                 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
124         return bg_start + colour + block;
125 }
126
127 #define ll_unmap_underlying_metadata(sb, blocknr) \
128         unmap_underlying_metadata((sb)->s_bdev, blocknr)
129
130 #ifndef EXT3_MB_HINT_GROUP_ALLOC
131 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
132                                 struct ext3_ext_path *path, unsigned long block,
133                                 unsigned long *count, int *err)
134 {
135         unsigned long pblock, goal;
136         int aflags = 0;
137         struct inode *inode = ext3_ext_base2inode(base);
138
139         goal = ext3_ext_find_goal(inode, path, block, &aflags);
140         aflags |= 2; /* block have been already reserved */
141         pblock = ext3_mb_new_blocks(handle, inode, goal, count, aflags, err);
142         return pblock;
143
144 }
145 #else
146 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
147                                 struct ext3_ext_path *path, unsigned long block,
148                                 unsigned long *count, int *err)
149 {
150         struct inode *inode = ext3_ext_base2inode(base);
151         struct ext3_allocation_request ar;
152         unsigned long pblock;
153         int aflags;
154
155         /* find neighbour allocated blocks */
156         ar.lleft = block;
157         *err = ext3_ext_search_left(base, path, &ar.lleft, &ar.pleft);
158         if (*err)
159                 return 0;
160         ar.lright = block;
161         *err = ext3_ext_search_right(base, path, &ar.lright, &ar.pright);
162         if (*err)
163                 return 0;
164
165         /* allocate new block */
166         ar.goal = ext3_ext_find_goal(inode, path, block, &aflags);
167         ar.inode = inode;
168         ar.logical = block;
169         ar.len = *count;
170         ar.flags = EXT3_MB_HINT_DATA;
171         pblock = ext3_mb_new_blocks(handle, &ar, err);
172         *count = ar.len;
173         return pblock;
174 }
175 #endif
176
177 static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
178                                   struct ext3_ext_path *path,
179                                   struct ext3_ext_cache *cex,
180 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
181                                    struct ext3_extent *ex,
182 #endif
183                                   void *cbdata)
184 {
185         struct bpointers *bp = cbdata;
186         struct inode *inode = ext3_ext_base2inode(base);
187         struct ext3_extent nex;
188         unsigned long pblock;
189         unsigned long tgen;
190         int err, i;
191         unsigned long count;
192         handle_t *handle;
193
194 #ifdef EXT3_EXT_CACHE_EXTENT
195         if (cex->ec_type == EXT3_EXT_CACHE_EXTENT)
196 #else
197         if ((cex->ec_len != 0) && (cex->ec_start != 0))
198 #endif
199                                                    {
200                 err = EXT_CONTINUE;
201                 goto map;
202         }
203
204         if (bp->create == 0) {
205                 i = 0;
206                 if (cex->ec_block < bp->start)
207                         i = bp->start - cex->ec_block;
208                 if (i >= cex->ec_len)
209                         CERROR("nothing to do?! i = %d, e_num = %u\n",
210                                         i, cex->ec_len);
211                 for (; i < cex->ec_len && bp->num; i++) {
212                         *(bp->blocks) = 0;
213                         bp->blocks++;
214                         bp->num--;
215                         bp->start++;
216                 }
217
218                 return EXT_CONTINUE;
219         }
220
221         tgen = EXT_GENERATION(base);
222         count = ext3_ext_calc_credits_for_insert(base, path);
223
224         handle = ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
225         if (IS_ERR(handle)) {
226                 return PTR_ERR(handle);
227         }
228
229         if (tgen != EXT_GENERATION(base)) {
230                 /* the tree has changed. so path can be invalid at moment */
231                 ext3_journal_stop(handle);
232                 return EXT_REPEAT;
233         }
234
235         /* In 2.6.32 kernel, ext4_ext_walk_space()'s callback func is not
236          * protected by i_data_sem as whole. so we patch it to store
237          * generation to path and now verify the tree hasn't changed */
238         down_write((&EXT4_I(inode)->i_data_sem));
239
240         /* validate extent, make sure the extent tree does not changed */
241         if (EXT_GENERATION(base) != path[0].p_generation) {
242                 /* cex is invalid, try again */
243                 up_write(&EXT4_I(inode)->i_data_sem);
244                 ext3_journal_stop(handle);
245                 return EXT_REPEAT;
246         }
247
248         count = cex->ec_len;
249         pblock = new_blocks(handle, base, path, cex->ec_block, &count, &err);
250         if (!pblock)
251                 goto out;
252         EXT_ASSERT(count <= cex->ec_len);
253
254         /* insert new extent */
255         nex.ee_block = cpu_to_le32(cex->ec_block);
256         ext3_ext_store_pblock(&nex, pblock);
257         nex.ee_len = cpu_to_le16(count);
258         err = fsfilt_ext3_ext_insert_extent(handle, base, path, &nex, 0);
259         if (err) {
260                 /* free data blocks we just allocated */
261                 /* not a good idea to call discard here directly,
262                  * but otherwise we'd need to call it every free() */
263 #ifdef EXT3_MB_HINT_GROUP_ALLOC
264                 ext3_mb_discard_inode_preallocations(inode);
265 #endif
266 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
267                 ext3_free_blocks(handle, inode, NULL, ext4_ext_pblock(&nex),
268                                  cpu_to_le16(nex.ee_len), 0);
269 #else
270                 ext3_free_blocks(handle, inode, ext4_ext_pblock(&nex),
271                                  cpu_to_le16(nex.ee_len), 0);
272 #endif
273                 goto out;
274         }
275
276         /*
277          * Putting len of the actual extent we just inserted,
278          * we are asking ext3_ext_walk_space() to continue
279          * scaning after that block
280          */
281         cex->ec_len = le16_to_cpu(nex.ee_len);
282         cex->ec_start = ext4_ext_pblock(&nex);
283         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
284         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
285
286 out:
287         up_write((&EXT4_I(inode)->i_data_sem));
288         ext3_journal_stop(handle);
289 map:
290         if (err >= 0) {
291                 /* map blocks */
292                 if (bp->num == 0) {
293                         CERROR("hmm. why do we find this extent?\n");
294                         CERROR("initial space: %lu:%u\n",
295                                 bp->start, bp->init_num);
296 #ifdef EXT3_EXT_CACHE_EXTENT
297                         CERROR("current extent: %u/%u/%llu %d\n",
298                                 cex->ec_block, cex->ec_len,
299                                 (unsigned long long)cex->ec_start,
300                                 cex->ec_type);
301 #else
302                         CERROR("current extent: %u/%u/%llu\n",
303                                 cex->ec_block, cex->ec_len,
304                                 (unsigned long long)cex->ec_start);
305 #endif
306                 }
307                 i = 0;
308                 if (cex->ec_block < bp->start)
309                         i = bp->start - cex->ec_block;
310                 if (i >= cex->ec_len)
311                         CERROR("nothing to do?! i = %d, e_num = %u\n",
312                                         i, cex->ec_len);
313                 for (; i < cex->ec_len && bp->num; i++) {
314                         *(bp->blocks) = cex->ec_start + i;
315 #ifdef EXT3_EXT_CACHE_EXTENT
316                         if (cex->ec_type != EXT3_EXT_CACHE_EXTENT)
317 #else
318                         if ((cex->ec_len == 0) || (cex->ec_start == 0))
319 #endif
320                                                                         {
321                                 /* unmap any possible underlying metadata from
322                                  * the block device mapping.  bug 6998. */
323                                 ll_unmap_underlying_metadata(inode->i_sb,
324                                                              *(bp->blocks));
325                         }
326                         bp->blocks++;
327                         bp->num--;
328                         bp->start++;
329                 }
330         }
331         return err;
332 }
333
334 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
335                        unsigned long num, unsigned long *blocks,
336                        int create)
337 {
338         struct ext3_ext_base *base = inode;
339         struct bpointers bp;
340         int err;
341
342         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
343                block, block + num - 1, (unsigned) inode->i_ino);
344
345         bp.blocks = blocks;
346         bp.start = block;
347         bp.init_num = bp.num = num;
348         bp.create = create;
349
350         err = fsfilt_ext3_ext_walk_space(base, block, num,
351                                          ext3_ext_new_extent_cb, &bp);
352         ext3_ext_invalidate_cache(base);
353
354         return err;
355 }
356
357 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
358                                     int pages, unsigned long *blocks,
359                                     int create)
360 {
361         int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
362         int rc = 0, i = 0;
363         struct page *fp = NULL;
364         int clen = 0;
365
366         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
367                 inode->i_ino, pages, (*page)->index);
368
369         /* pages are sorted already. so, we just have to find
370          * contig. space and process them properly */
371         while (i < pages) {
372                 if (fp == NULL) {
373                         /* start new extent */
374                         fp = *page++;
375                         clen = 1;
376                         i++;
377                         continue;
378                 } else if (fp->index + clen == (*page)->index) {
379                         /* continue the extent */
380                         page++;
381                         clen++;
382                         i++;
383                         continue;
384                 }
385
386                 /* process found extent */
387                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
388                                         clen * blocks_per_page, blocks,
389                                         create);
390                 if (rc)
391                         GOTO(cleanup, rc);
392
393                 /* look for next extent */
394                 fp = NULL;
395                 blocks += blocks_per_page * clen;
396         }
397
398         if (fp)
399                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
400                                         clen * blocks_per_page, blocks,
401                                         create);
402 cleanup:
403         return rc;
404 }
405
406 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
407                                    int pages, unsigned long *blocks,
408                                    int create)
409 {
410         int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
411         unsigned long *b;
412         int rc = 0, i;
413
414         for (i = 0, b = blocks; i < pages; i++, page++) {
415                 rc = ext3_map_inode_page(inode, *page, b, create);
416                 if (rc) {
417                         CERROR("ino %lu, blk %lu create %d: rc %d\n",
418                                inode->i_ino, *b, create, rc);
419                         break;
420                 }
421
422                 b += blocks_per_page;
423         }
424         return rc;
425 }
426
427 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
428                                 int pages, unsigned long *blocks,
429                                 int create, struct mutex *optional_mutex)
430 {
431         int rc;
432
433         if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
434                 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
435                                                      blocks, create);
436                 return rc;
437         }
438         if (optional_mutex != NULL)
439                 mutex_lock(optional_mutex);
440         rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks, create);
441         if (optional_mutex != NULL)
442                 mutex_unlock(optional_mutex);
443
444         return rc;
445 }
446
447 static struct fsfilt_operations fsfilt_ext3_ops = {
448         .fs_type                = "ext3",
449         .fs_owner               = THIS_MODULE,
450         .fs_map_inode_pages     = fsfilt_ext3_map_inode_pages,
451 };
452
453 static int __init fsfilt_ext3_init(void)
454 {
455         return fsfilt_register_ops(&fsfilt_ext3_ops);
456 }
457
458 static void __exit fsfilt_ext3_exit(void)
459 {
460         fsfilt_unregister_ops(&fsfilt_ext3_ops);
461 }
462
463 module_init(fsfilt_ext3_init);
464 module_exit(fsfilt_ext3_exit);
465
466 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
467 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
468 MODULE_LICENSE("GPL");