Whamcloud - gitweb
LU-16756 kernel: RHEL 9.2 server support
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / rhel9.2 / ext4-fiemap-kernel-data.patch
1 Subject: [PATCH] ext4-fiemap-kerenl-data
2
3 The patch clones:
4   ext4_iomap_to_fiemap from iomap_to_fiemap
5   ext4_iomap_fiemap from iomap_fiemap
6   ext4_fiemap_fill_next_extent from fiemap_fill_next_extent
7 and changes ext4_fiemap_fill_next_extent to conditionally
8 use memcpy instead of copy_to_user.
9
10 Signed-off-by: shaun.tancheff@hpe.com
11
12 ---
13  fs/ext4/ext4.h    |   3 +
14  fs/ext4/extents.c | 239 +++++++++++++++++++++++++++++++++++++++++++++-
15  2 files changed, 238 insertions(+), 4 deletions(-)
16
17 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
18 index c4d59e5..0739f10 100644
19 --- a/fs/ext4/ext4.h
20 +++ b/fs/ext4/ext4.h
21 @@ -772,6 +772,9 @@ enum {
22   */
23  #define EXT4_FIEMAP_EXTENT_HOLE                0x08000000
24  
25 +/* Otherwise unused fi_flags ext4 use memcpy instead of copy_[to|from]_uiser */
26 +#define EXT4_FIEMAP_FLAG_MEMCPY                0x80000000
27 +
28  /* Max physical block we can address w/o extents */
29  #define EXT4_MAX_BLOCK_FILE_PHYS       0xFFFFFFFF
30  
31 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
32 index c148bb9..b97a328 100644
33 --- a/fs/ext4/extents.c
34 +++ b/fs/ext4/extents.c
35 @@ -2175,6 +2175,238 @@ cleanup:
36         return err;
37  }
38  
39 +#ifdef KERNEL_DS
40 +#define ext4_iomap_fiemap(i, f, s, l, ops) \
41 +       iomap_fiemap((i), (f), (s), (l), (ops))
42 +#else
43 +/*
44 + * linux: 
45 + *   ext4_fiemap_fill_next_extent <--- fiemap_fill_next_extent
46 + *   _ext4_iomap_fiemap <------------- iomap_fiemap
47 + *   ext4_iomap_to_fiemap <----------- iomap_to_fiemap
48 + */
49 +/**
50 + * ext4_fiemap_fill_next_extent - Fiemap helper function
51 + * @fieinfo:   Fiemap context passed into ->fiemap
52 + * @logical:   Extent logical start offset, in bytes
53 + * @phys:      Extent physical start offset, in bytes
54 + * @len:       Extent length, in bytes
55 + * @flags:     FIEMAP_EXTENT flags that describe this extent
56 + *
57 + * Called from file system ->fiemap callback. Will populate extent
58 + * info as passed in via arguments and copy to user memory. On
59 + * success, extent count on fieinfo is incremented.
60 + *
61 + * Returns 0 on success, -errno on error, 1 if this was the last
62 + * extent that will fit in user array.
63 + */
64 +#define SET_UNKNOWN_FLAGS      (FIEMAP_EXTENT_DELALLOC)
65 +#define SET_NO_UNMOUNTED_IO_FLAGS      (FIEMAP_EXTENT_DATA_ENCRYPTED)
66 +#define SET_NOT_ALIGNED_FLAGS  (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
67 +static int ext4_fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo,
68 +                                       u64 logical, u64 phys, u64 len,
69 +                                       u32 flags)
70 +{
71 +       struct fiemap_extent extent;
72 +       struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
73 +
74 +       /* only count the extents */
75 +       if (fieinfo->fi_extents_max == 0) {
76 +               fieinfo->fi_extents_mapped++;
77 +               return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
78 +       }
79 +
80 +       if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
81 +               return 1;
82 +
83 +       if (flags & SET_UNKNOWN_FLAGS)
84 +               flags |= FIEMAP_EXTENT_UNKNOWN;
85 +       if (flags & SET_NO_UNMOUNTED_IO_FLAGS)
86 +               flags |= FIEMAP_EXTENT_ENCODED;
87 +       if (flags & SET_NOT_ALIGNED_FLAGS)
88 +               flags |= FIEMAP_EXTENT_NOT_ALIGNED;
89 +
90 +       memset(&extent, 0, sizeof(extent));
91 +       extent.fe_logical = logical;
92 +       extent.fe_physical = phys;
93 +       extent.fe_length = len;
94 +       extent.fe_flags = flags;
95 +
96 +       dest += fieinfo->fi_extents_mapped;
97 +       if (fieinfo->fi_flags & EXT4_FIEMAP_FLAG_MEMCPY)
98 +               memcpy((__force void *)dest, &extent, sizeof(extent));
99 +       else if (copy_to_user(dest, &extent, sizeof(extent)))
100 +               return -EFAULT;
101 +
102 +       fieinfo->fi_extents_mapped++;
103 +       if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
104 +               return 1;
105 +       return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
106 +}
107 +
108 +static int ext4_iomap_to_fiemap(struct fiemap_extent_info *fi,
109 +                               struct iomap *iomap, u32 flags)
110 +{
111 +       switch (iomap->type) {
112 +       case IOMAP_HOLE:
113 +               /* skip holes */
114 +               return 0;
115 +       case IOMAP_DELALLOC:
116 +               flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
117 +               break;
118 +       case IOMAP_MAPPED:
119 +               break;
120 +       case IOMAP_UNWRITTEN:
121 +               flags |= FIEMAP_EXTENT_UNWRITTEN;
122 +               break;
123 +       case IOMAP_INLINE:
124 +               flags |= FIEMAP_EXTENT_DATA_INLINE;
125 +               break;
126 +       }
127 +
128 +       if (iomap->flags & IOMAP_F_MERGED)
129 +               flags |= FIEMAP_EXTENT_MERGED;
130 +       if (iomap->flags & IOMAP_F_SHARED)
131 +               flags |= FIEMAP_EXTENT_SHARED;
132 +
133 +       return ext4_fiemap_fill_next_extent(fi, iomap->offset,
134 +                       iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
135 +                       iomap->length, flags);
136 +}
137 +
138 +/* iomap_iter */
139 +static inline int iomap_iter_advance(struct iomap_iter *iter)
140 +{
141 +       /* handle the previous iteration (if any) */
142 +       if (iter->iomap.length) {
143 +               if (iter->processed <= 0)
144 +                       return iter->processed;
145 +               if (WARN_ON_ONCE(iter->processed > iomap_length(iter)))
146 +                       return -EIO;
147 +               iter->pos += iter->processed;
148 +               iter->len -= iter->processed;
149 +               if (!iter->len)
150 +                       return 0;
151 +       }
152 +
153 +       /* clear the state for the next iteration */
154 +       iter->processed = 0;
155 +       memset(&iter->iomap, 0, sizeof(iter->iomap));
156 +       memset(&iter->srcmap, 0, sizeof(iter->srcmap));
157 +       return 1;
158 +}
159 +
160 +static inline void iomap_iter_done(struct iomap_iter *iter)
161 +{
162 +       WARN_ON_ONCE(iter->iomap.offset > iter->pos);
163 +       WARN_ON_ONCE(iter->iomap.length == 0);
164 +       WARN_ON_ONCE(iter->iomap.offset + iter->iomap.length <= iter->pos);
165 +       WARN_ON_ONCE(iter->srcmap.type != IOMAP_HOLE);
166 +}
167 +
168 +/**
169 + * iomap_iter - iterate over a ranges in a file
170 + * @iter: iteration structue
171 + * @ops: iomap ops provided by the file system
172 + *
173 + * Iterate over filesystem-provided space mappings for the provided file range.
174 + *
175 + * This function handles cleanup of resources acquired for iteration when the
176 + * filesystem indicates there are no more space mappings, which means that this
177 + * function must be called in a loop that continues as long it returns a
178 + * positive value.  If 0 or a negative value is returned, the caller must not
179 + * return to the loop body.  Within a loop body, there are two ways to break out
180 + * of the loop body:  leave @iter.processed unchanged, or set it to a negative
181 + * errno.
182 + */
183 +int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops)
184 +{
185 +       int ret;
186 +
187 +       if (iter->iomap.length && ops->iomap_end) {
188 +               ret = ops->iomap_end(iter->inode, iter->pos, iomap_length(iter),
189 +                               iter->processed > 0 ? iter->processed : 0,
190 +                               iter->flags, &iter->iomap);
191 +               if (ret < 0 && !iter->processed)
192 +                       return ret;
193 +       }
194 +
195 +       ret = iomap_iter_advance(iter);
196 +       if (ret <= 0)
197 +               return ret;
198 +
199 +       ret = ops->iomap_begin(iter->inode, iter->pos, iter->len, iter->flags,
200 +                              &iter->iomap, &iter->srcmap);
201 +       if (ret < 0)
202 +               return ret;
203 +       iomap_iter_done(iter);
204 +       return 1;
205 +}
206 +
207 +static loff_t iomap_fiemap_iter(const struct iomap_iter *iter,
208 +               struct fiemap_extent_info *fi, struct iomap *prev)
209 +{
210 +       int ret;
211 +
212 +       if (iter->iomap.type == IOMAP_HOLE)
213 +               return iomap_length(iter);
214 +
215 +       ret = ext4_iomap_to_fiemap(fi, prev, 0);
216 +       *prev = iter->iomap;
217 +       switch (ret) {
218 +       case 0:         /* success */
219 +               return iomap_length(iter);
220 +       case 1:         /* extent array full */
221 +               return 0;
222 +       default:        /* error */
223 +               return ret;
224 +       }
225 +}
226 +
227 +static
228 +int _ext4_iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
229 +                       u64 start, u64 len, const struct iomap_ops *ops)
230 +{
231 +       struct iomap_iter iter = {
232 +               .inode          = inode,
233 +               .pos            = start,
234 +               .len            = len,
235 +               .flags          = IOMAP_REPORT,
236 +       };
237 +       struct iomap prev = {
238 +               .type           = IOMAP_HOLE,
239 +       };
240 +       int ret;
241 +       bool in_kernel = fi->fi_flags & EXT4_FIEMAP_FLAG_MEMCPY;
242 +
243 +       fi->fi_flags &= ~EXT4_FIEMAP_FLAG_MEMCPY;
244 +       ret = fiemap_prep(inode, fi, start, &iter.len, 0);
245 +       if (in_kernel)
246 +               fi->fi_flags |= EXT4_FIEMAP_FLAG_MEMCPY;
247 +       if (ret) {
248 +               return ret;
249 +       }
250 +
251 +       while ((ret = iomap_iter(&iter, ops)) > 0)
252 +               iter.processed = iomap_fiemap_iter(&iter, fi, &prev);
253 +
254 +       if (prev.type != IOMAP_HOLE) {
255 +               ret = ext4_iomap_to_fiemap(fi, &prev, FIEMAP_EXTENT_LAST);
256 +               if (ret < 0) {
257 +                       return ret;
258 +               }
259 +       }
260 +
261 +       /* inode with no (attribute) mapping will give ENOENT */
262 +       if (ret < 0 && ret != -ENOENT)
263 +               return ret;
264 +       return 0;
265 +}
266 +
267 +#define ext4_iomap_fiemap(i, f, s, l, ops) \
268 +       _ext4_iomap_fiemap((i), (f), (s), (l), (ops))
269 +#endif /* KERNEL_DS */
270 +
271  static int ext4_fill_es_cache_info(struct inode *inode,
272                                    ext4_lblk_t block, ext4_lblk_t num,
273                                    struct fiemap_extent_info *fieinfo)
274 @@ -4959,11 +5191,10 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
275  
276         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
277                 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
278 -               return iomap_fiemap(inode, fieinfo, start, len,
279 -                                   &ext4_iomap_xattr_ops);
280 +               return ext4_iomap_fiemap(inode, fieinfo, start, len,
281 +                                        &ext4_iomap_xattr_ops);
282         }
283 -
284 -       return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
285 +       return ext4_iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
286  }
287  
288  int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
289 -- 
290 2.34.1
291