1 Subject: [PATCH] ext4-fiemap-kerenl-data
4 ext4_iomap_to_fiemap from iomap_to_fiemap
5 ext4_iomap_fiemap from iomap_fiemap
6 ext4_fiemap_fill_next_extent from fiemap_fill_next_extent
7 and changes ext4_fiemap_fill_next_extent to conditionally
8 use memcpy instead of copy_to_user.
10 Signed-off-by: shaun.tancheff@hpe.com
14 fs/ext4/extents.c | 239 +++++++++++++++++++++++++++++++++++++++++++++-
15 2 files changed, 238 insertions(+), 4 deletions(-)
17 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
18 index c4d59e5..0739f10 100644
21 @@ -772,6 +772,9 @@ enum {
23 #define EXT4_FIEMAP_EXTENT_HOLE 0x08000000
25 +/* Otherwise unused fi_flags ext4 use memcpy instead of copy_[to|from]_uiser */
26 +#define EXT4_FIEMAP_FLAG_MEMCPY 0x80000000
28 /* Max physical block we can address w/o extents */
29 #define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
31 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
32 index c148bb9..b97a328 100644
33 --- a/fs/ext4/extents.c
34 +++ b/fs/ext4/extents.c
35 @@ -2175,6 +2175,238 @@ cleanup:
40 +#define ext4_iomap_fiemap(i, f, s, l, ops) \
41 + iomap_fiemap((i), (f), (s), (l), (ops))
45 + * ext4_fiemap_fill_next_extent <--- fiemap_fill_next_extent
46 + * _ext4_iomap_fiemap <------------- iomap_fiemap
47 + * ext4_iomap_to_fiemap <----------- iomap_to_fiemap
50 + * ext4_fiemap_fill_next_extent - Fiemap helper function
51 + * @fieinfo: Fiemap context passed into ->fiemap
52 + * @logical: Extent logical start offset, in bytes
53 + * @phys: Extent physical start offset, in bytes
54 + * @len: Extent length, in bytes
55 + * @flags: FIEMAP_EXTENT flags that describe this extent
57 + * Called from file system ->fiemap callback. Will populate extent
58 + * info as passed in via arguments and copy to user memory. On
59 + * success, extent count on fieinfo is incremented.
61 + * Returns 0 on success, -errno on error, 1 if this was the last
62 + * extent that will fit in user array.
64 +#define SET_UNKNOWN_FLAGS (FIEMAP_EXTENT_DELALLOC)
65 +#define SET_NO_UNMOUNTED_IO_FLAGS (FIEMAP_EXTENT_DATA_ENCRYPTED)
66 +#define SET_NOT_ALIGNED_FLAGS (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
67 +static int ext4_fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo,
68 + u64 logical, u64 phys, u64 len,
71 + struct fiemap_extent extent;
72 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
74 + /* only count the extents */
75 + if (fieinfo->fi_extents_max == 0) {
76 + fieinfo->fi_extents_mapped++;
77 + return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
80 + if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
83 + if (flags & SET_UNKNOWN_FLAGS)
84 + flags |= FIEMAP_EXTENT_UNKNOWN;
85 + if (flags & SET_NO_UNMOUNTED_IO_FLAGS)
86 + flags |= FIEMAP_EXTENT_ENCODED;
87 + if (flags & SET_NOT_ALIGNED_FLAGS)
88 + flags |= FIEMAP_EXTENT_NOT_ALIGNED;
90 + memset(&extent, 0, sizeof(extent));
91 + extent.fe_logical = logical;
92 + extent.fe_physical = phys;
93 + extent.fe_length = len;
94 + extent.fe_flags = flags;
96 + dest += fieinfo->fi_extents_mapped;
97 + if (fieinfo->fi_flags & EXT4_FIEMAP_FLAG_MEMCPY)
98 + memcpy((__force void *)dest, &extent, sizeof(extent));
99 + else if (copy_to_user(dest, &extent, sizeof(extent)))
102 + fieinfo->fi_extents_mapped++;
103 + if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
105 + return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
108 +static int ext4_iomap_to_fiemap(struct fiemap_extent_info *fi,
109 + struct iomap *iomap, u32 flags)
111 + switch (iomap->type) {
115 + case IOMAP_DELALLOC:
116 + flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
120 + case IOMAP_UNWRITTEN:
121 + flags |= FIEMAP_EXTENT_UNWRITTEN;
124 + flags |= FIEMAP_EXTENT_DATA_INLINE;
128 + if (iomap->flags & IOMAP_F_MERGED)
129 + flags |= FIEMAP_EXTENT_MERGED;
130 + if (iomap->flags & IOMAP_F_SHARED)
131 + flags |= FIEMAP_EXTENT_SHARED;
133 + return ext4_fiemap_fill_next_extent(fi, iomap->offset,
134 + iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
135 + iomap->length, flags);
139 +static inline int iomap_iter_advance(struct iomap_iter *iter)
141 + /* handle the previous iteration (if any) */
142 + if (iter->iomap.length) {
143 + if (iter->processed <= 0)
144 + return iter->processed;
145 + if (WARN_ON_ONCE(iter->processed > iomap_length(iter)))
147 + iter->pos += iter->processed;
148 + iter->len -= iter->processed;
153 + /* clear the state for the next iteration */
154 + iter->processed = 0;
155 + memset(&iter->iomap, 0, sizeof(iter->iomap));
156 + memset(&iter->srcmap, 0, sizeof(iter->srcmap));
160 +static inline void iomap_iter_done(struct iomap_iter *iter)
162 + WARN_ON_ONCE(iter->iomap.offset > iter->pos);
163 + WARN_ON_ONCE(iter->iomap.length == 0);
164 + WARN_ON_ONCE(iter->iomap.offset + iter->iomap.length <= iter->pos);
165 + WARN_ON_ONCE(iter->srcmap.type != IOMAP_HOLE);
169 + * iomap_iter - iterate over a ranges in a file
170 + * @iter: iteration structue
171 + * @ops: iomap ops provided by the file system
173 + * Iterate over filesystem-provided space mappings for the provided file range.
175 + * This function handles cleanup of resources acquired for iteration when the
176 + * filesystem indicates there are no more space mappings, which means that this
177 + * function must be called in a loop that continues as long it returns a
178 + * positive value. If 0 or a negative value is returned, the caller must not
179 + * return to the loop body. Within a loop body, there are two ways to break out
180 + * of the loop body: leave @iter.processed unchanged, or set it to a negative
183 +int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops)
187 + if (iter->iomap.length && ops->iomap_end) {
188 + ret = ops->iomap_end(iter->inode, iter->pos, iomap_length(iter),
189 + iter->processed > 0 ? iter->processed : 0,
190 + iter->flags, &iter->iomap);
191 + if (ret < 0 && !iter->processed)
195 + ret = iomap_iter_advance(iter);
199 + ret = ops->iomap_begin(iter->inode, iter->pos, iter->len, iter->flags,
200 + &iter->iomap, &iter->srcmap);
203 + iomap_iter_done(iter);
207 +static loff_t iomap_fiemap_iter(const struct iomap_iter *iter,
208 + struct fiemap_extent_info *fi, struct iomap *prev)
212 + if (iter->iomap.type == IOMAP_HOLE)
213 + return iomap_length(iter);
215 + ret = ext4_iomap_to_fiemap(fi, prev, 0);
216 + *prev = iter->iomap;
218 + case 0: /* success */
219 + return iomap_length(iter);
220 + case 1: /* extent array full */
222 + default: /* error */
228 +int _ext4_iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
229 + u64 start, u64 len, const struct iomap_ops *ops)
231 + struct iomap_iter iter = {
235 + .flags = IOMAP_REPORT,
237 + struct iomap prev = {
238 + .type = IOMAP_HOLE,
241 + bool in_kernel = fi->fi_flags & EXT4_FIEMAP_FLAG_MEMCPY;
243 + fi->fi_flags &= ~EXT4_FIEMAP_FLAG_MEMCPY;
244 + ret = fiemap_prep(inode, fi, start, &iter.len, 0);
246 + fi->fi_flags |= EXT4_FIEMAP_FLAG_MEMCPY;
251 + while ((ret = iomap_iter(&iter, ops)) > 0)
252 + iter.processed = iomap_fiemap_iter(&iter, fi, &prev);
254 + if (prev.type != IOMAP_HOLE) {
255 + ret = ext4_iomap_to_fiemap(fi, &prev, FIEMAP_EXTENT_LAST);
261 + /* inode with no (attribute) mapping will give ENOENT */
262 + if (ret < 0 && ret != -ENOENT)
267 +#define ext4_iomap_fiemap(i, f, s, l, ops) \
268 + _ext4_iomap_fiemap((i), (f), (s), (l), (ops))
269 +#endif /* KERNEL_DS */
271 static int ext4_fill_es_cache_info(struct inode *inode,
272 ext4_lblk_t block, ext4_lblk_t num,
273 struct fiemap_extent_info *fieinfo)
274 @@ -4959,11 +5191,10 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
276 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
277 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
278 - return iomap_fiemap(inode, fieinfo, start, len,
279 - &ext4_iomap_xattr_ops);
280 + return ext4_iomap_fiemap(inode, fieinfo, start, len,
281 + &ext4_iomap_xattr_ops);
284 - return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
285 + return ext4_iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
288 int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,