blob: 49912814f3d8dda93f50b98f664f586a81c8d072 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: LGPL-2.1
2/*
3 * Copyright IBM Corporation, 2007
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
6 */
7
8#include <linux/slab.h>
9#include "ext4_jbd2.h"
10#include "ext4_extents.h"
11
12/*
13 * The contiguous blocks details which can be
14 * represented by a single extent
15 */
16struct migrate_struct {
17 ext4_lblk_t first_block, last_block, curr_block;
18 ext4_fsblk_t first_pblock, last_pblock;
19};
20
21static int finish_range(handle_t *handle, struct inode *inode,
22 struct migrate_struct *lb)
23
24{
25 int retval = 0, needed;
26 struct ext4_extent newext;
27 struct ext4_ext_path *path;
28 if (lb->first_pblock == 0)
29 return 0;
30
31 /* Add the extent to temp inode*/
32 newext.ee_block = cpu_to_le32(lb->first_block);
33 newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
34 ext4_ext_store_pblock(&newext, lb->first_pblock);
35 /* Locking only for convinience since we are operating on temp inode */
36 down_write(&EXT4_I(inode)->i_data_sem);
37 path = ext4_find_extent(inode, lb->first_block, NULL, 0);
38 if (IS_ERR(path)) {
39 retval = PTR_ERR(path);
40 path = NULL;
41 goto err_out;
42 }
43
44 /*
45 * Calculate the credit needed to inserting this extent
46 * Since we are doing this in loop we may accumalate extra
47 * credit. But below we try to not accumalate too much
48 * of them by restarting the journal.
49 */
50 needed = ext4_ext_calc_credits_for_single_extent(inode,
51 lb->last_block - lb->first_block + 1, path);
52
Olivier Deprez157378f2022-04-04 15:47:50 +020053 retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
54 if (retval < 0)
55 goto err_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056 retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
57err_out:
58 up_write((&EXT4_I(inode)->i_data_sem));
59 ext4_ext_drop_refs(path);
60 kfree(path);
61 lb->first_pblock = 0;
62 return retval;
63}
64
65static int update_extent_range(handle_t *handle, struct inode *inode,
66 ext4_fsblk_t pblock, struct migrate_struct *lb)
67{
68 int retval;
69 /*
70 * See if we can add on to the existing range (if it exists)
71 */
72 if (lb->first_pblock &&
73 (lb->last_pblock+1 == pblock) &&
74 (lb->last_block+1 == lb->curr_block)) {
75 lb->last_pblock = pblock;
76 lb->last_block = lb->curr_block;
77 lb->curr_block++;
78 return 0;
79 }
80 /*
81 * Start a new range.
82 */
83 retval = finish_range(handle, inode, lb);
84 lb->first_pblock = lb->last_pblock = pblock;
85 lb->first_block = lb->last_block = lb->curr_block;
86 lb->curr_block++;
87 return retval;
88}
89
90static int update_ind_extent_range(handle_t *handle, struct inode *inode,
91 ext4_fsblk_t pblock,
92 struct migrate_struct *lb)
93{
94 struct buffer_head *bh;
95 __le32 *i_data;
96 int i, retval = 0;
97 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
98
David Brazdil0f672f62019-12-10 10:32:29 +000099 bh = ext4_sb_bread(inode->i_sb, pblock, 0);
100 if (IS_ERR(bh))
101 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102
103 i_data = (__le32 *)bh->b_data;
104 for (i = 0; i < max_entries; i++) {
105 if (i_data[i]) {
106 retval = update_extent_range(handle, inode,
107 le32_to_cpu(i_data[i]), lb);
108 if (retval)
109 break;
110 } else {
111 lb->curr_block++;
112 }
113 }
114 put_bh(bh);
115 return retval;
116
117}
118
119static int update_dind_extent_range(handle_t *handle, struct inode *inode,
120 ext4_fsblk_t pblock,
121 struct migrate_struct *lb)
122{
123 struct buffer_head *bh;
124 __le32 *i_data;
125 int i, retval = 0;
126 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
127
David Brazdil0f672f62019-12-10 10:32:29 +0000128 bh = ext4_sb_bread(inode->i_sb, pblock, 0);
129 if (IS_ERR(bh))
130 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000131
132 i_data = (__le32 *)bh->b_data;
133 for (i = 0; i < max_entries; i++) {
134 if (i_data[i]) {
135 retval = update_ind_extent_range(handle, inode,
136 le32_to_cpu(i_data[i]), lb);
137 if (retval)
138 break;
139 } else {
140 /* Only update the file block number */
141 lb->curr_block += max_entries;
142 }
143 }
144 put_bh(bh);
145 return retval;
146
147}
148
149static int update_tind_extent_range(handle_t *handle, struct inode *inode,
150 ext4_fsblk_t pblock,
151 struct migrate_struct *lb)
152{
153 struct buffer_head *bh;
154 __le32 *i_data;
155 int i, retval = 0;
156 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
157
David Brazdil0f672f62019-12-10 10:32:29 +0000158 bh = ext4_sb_bread(inode->i_sb, pblock, 0);
159 if (IS_ERR(bh))
160 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000161
162 i_data = (__le32 *)bh->b_data;
163 for (i = 0; i < max_entries; i++) {
164 if (i_data[i]) {
165 retval = update_dind_extent_range(handle, inode,
166 le32_to_cpu(i_data[i]), lb);
167 if (retval)
168 break;
169 } else {
170 /* Only update the file block number */
171 lb->curr_block += max_entries * max_entries;
172 }
173 }
174 put_bh(bh);
175 return retval;
176
177}
178
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000179static int free_dind_blocks(handle_t *handle,
180 struct inode *inode, __le32 i_data)
181{
182 int i;
183 __le32 *tmp_idata;
184 struct buffer_head *bh;
Olivier Deprez157378f2022-04-04 15:47:50 +0200185 struct super_block *sb = inode->i_sb;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000186 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
Olivier Deprez157378f2022-04-04 15:47:50 +0200187 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000188
Olivier Deprez157378f2022-04-04 15:47:50 +0200189 bh = ext4_sb_bread(sb, le32_to_cpu(i_data), 0);
David Brazdil0f672f62019-12-10 10:32:29 +0000190 if (IS_ERR(bh))
191 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192
193 tmp_idata = (__le32 *)bh->b_data;
194 for (i = 0; i < max_entries; i++) {
195 if (tmp_idata[i]) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200196 err = ext4_journal_ensure_credits(handle,
197 EXT4_RESERVE_TRANS_BLOCKS,
198 ext4_free_metadata_revoke_credits(sb, 1));
199 if (err < 0) {
200 put_bh(bh);
201 return err;
202 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000203 ext4_free_blocks(handle, inode, NULL,
204 le32_to_cpu(tmp_idata[i]), 1,
205 EXT4_FREE_BLOCKS_METADATA |
206 EXT4_FREE_BLOCKS_FORGET);
207 }
208 }
209 put_bh(bh);
Olivier Deprez157378f2022-04-04 15:47:50 +0200210 err = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
211 ext4_free_metadata_revoke_credits(sb, 1));
212 if (err < 0)
213 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000214 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
215 EXT4_FREE_BLOCKS_METADATA |
216 EXT4_FREE_BLOCKS_FORGET);
217 return 0;
218}
219
220static int free_tind_blocks(handle_t *handle,
221 struct inode *inode, __le32 i_data)
222{
223 int i, retval = 0;
224 __le32 *tmp_idata;
225 struct buffer_head *bh;
226 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
227
David Brazdil0f672f62019-12-10 10:32:29 +0000228 bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
229 if (IS_ERR(bh))
230 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231
232 tmp_idata = (__le32 *)bh->b_data;
233 for (i = 0; i < max_entries; i++) {
234 if (tmp_idata[i]) {
235 retval = free_dind_blocks(handle,
236 inode, tmp_idata[i]);
237 if (retval) {
238 put_bh(bh);
239 return retval;
240 }
241 }
242 }
243 put_bh(bh);
Olivier Deprez157378f2022-04-04 15:47:50 +0200244 retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
245 ext4_free_metadata_revoke_credits(inode->i_sb, 1));
246 if (retval < 0)
247 return retval;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000248 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
249 EXT4_FREE_BLOCKS_METADATA |
250 EXT4_FREE_BLOCKS_FORGET);
251 return 0;
252}
253
254static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
255{
256 int retval;
257
258 /* ei->i_data[EXT4_IND_BLOCK] */
259 if (i_data[0]) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200260 retval = ext4_journal_ensure_credits(handle,
261 EXT4_RESERVE_TRANS_BLOCKS,
262 ext4_free_metadata_revoke_credits(inode->i_sb, 1));
263 if (retval < 0)
264 return retval;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265 ext4_free_blocks(handle, inode, NULL,
266 le32_to_cpu(i_data[0]), 1,
267 EXT4_FREE_BLOCKS_METADATA |
268 EXT4_FREE_BLOCKS_FORGET);
269 }
270
271 /* ei->i_data[EXT4_DIND_BLOCK] */
272 if (i_data[1]) {
273 retval = free_dind_blocks(handle, inode, i_data[1]);
274 if (retval)
275 return retval;
276 }
277
278 /* ei->i_data[EXT4_TIND_BLOCK] */
279 if (i_data[2]) {
280 retval = free_tind_blocks(handle, inode, i_data[2]);
281 if (retval)
282 return retval;
283 }
284 return 0;
285}
286
287static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
288 struct inode *tmp_inode)
289{
Olivier Deprez157378f2022-04-04 15:47:50 +0200290 int retval, retval2 = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000291 __le32 i_data[3];
292 struct ext4_inode_info *ei = EXT4_I(inode);
293 struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
294
295 /*
296 * One credit accounted for writing the
297 * i_data field of the original inode
298 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200299 retval = ext4_journal_ensure_credits(handle, 1, 0);
300 if (retval < 0)
301 goto err_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000302
303 i_data[0] = ei->i_data[EXT4_IND_BLOCK];
304 i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
305 i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
306
307 down_write(&EXT4_I(inode)->i_data_sem);
308 /*
309 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
310 * happened after we started the migrate. We need to
311 * fail the migrate
312 */
313 if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
314 retval = -EAGAIN;
315 up_write(&EXT4_I(inode)->i_data_sem);
316 goto err_out;
317 } else
318 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
319 /*
320 * We have the extent map build with the tmp inode.
321 * Now copy the i_data across
322 */
323 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
324 memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
325
326 /*
327 * Update i_blocks with the new blocks that got
328 * allocated while adding extents for extent index
329 * blocks.
330 *
331 * While converting to extents we need not
332 * update the original inode i_blocks for extent blocks
333 * via quota APIs. The quota update happened via tmp_inode already.
334 */
335 spin_lock(&inode->i_lock);
336 inode->i_blocks += tmp_inode->i_blocks;
337 spin_unlock(&inode->i_lock);
338 up_write(&EXT4_I(inode)->i_data_sem);
339
340 /*
341 * We mark the inode dirty after, because we decrement the
342 * i_blocks when freeing the indirect meta-data blocks
343 */
344 retval = free_ind_block(handle, inode, i_data);
Olivier Deprez157378f2022-04-04 15:47:50 +0200345 retval2 = ext4_mark_inode_dirty(handle, inode);
346 if (unlikely(retval2 && !retval))
347 retval = retval2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000348
349err_out:
350 return retval;
351}
352
353static int free_ext_idx(handle_t *handle, struct inode *inode,
354 struct ext4_extent_idx *ix)
355{
356 int i, retval = 0;
357 ext4_fsblk_t block;
358 struct buffer_head *bh;
359 struct ext4_extent_header *eh;
360
361 block = ext4_idx_pblock(ix);
David Brazdil0f672f62019-12-10 10:32:29 +0000362 bh = ext4_sb_bread(inode->i_sb, block, 0);
363 if (IS_ERR(bh))
364 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000365
366 eh = (struct ext4_extent_header *)bh->b_data;
367 if (eh->eh_depth != 0) {
368 ix = EXT_FIRST_INDEX(eh);
369 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
370 retval = free_ext_idx(handle, inode, ix);
Olivier Deprez157378f2022-04-04 15:47:50 +0200371 if (retval) {
372 put_bh(bh);
373 return retval;
374 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000375 }
376 }
377 put_bh(bh);
Olivier Deprez157378f2022-04-04 15:47:50 +0200378 retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
379 ext4_free_metadata_revoke_credits(inode->i_sb, 1));
380 if (retval < 0)
381 return retval;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000382 ext4_free_blocks(handle, inode, NULL, block, 1,
383 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
Olivier Deprez157378f2022-04-04 15:47:50 +0200384 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000385}
386
387/*
388 * Free the extent meta data blocks only
389 */
390static int free_ext_block(handle_t *handle, struct inode *inode)
391{
392 int i, retval = 0;
393 struct ext4_inode_info *ei = EXT4_I(inode);
394 struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
395 struct ext4_extent_idx *ix;
396 if (eh->eh_depth == 0)
397 /*
398 * No extra blocks allocated for extent meta data
399 */
400 return 0;
401 ix = EXT_FIRST_INDEX(eh);
402 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
403 retval = free_ext_idx(handle, inode, ix);
404 if (retval)
405 return retval;
406 }
407 return retval;
408}
409
410int ext4_ext_migrate(struct inode *inode)
411{
Olivier Deprez0e641232021-09-23 10:07:05 +0200412 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000413 handle_t *handle;
414 int retval = 0, i;
415 __le32 *i_data;
416 struct ext4_inode_info *ei;
417 struct inode *tmp_inode = NULL;
418 struct migrate_struct lb;
419 unsigned long max_entries;
420 __u32 goal;
421 uid_t owner[2];
422
423 /*
424 * If the filesystem does not support extents, or the inode
425 * already is extent-based, error out.
426 */
427 if (!ext4_has_feature_extents(inode->i_sb) ||
428 (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
429 return -EINVAL;
430
431 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
432 /*
433 * don't migrate fast symlink
434 */
435 return retval;
436
Olivier Deprez0e641232021-09-23 10:07:05 +0200437 percpu_down_write(&sbi->s_writepages_rwsem);
438
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000439 /*
Olivier Deprez157378f2022-04-04 15:47:50 +0200440 * Worst case we can touch the allocation bitmaps and a block
441 * group descriptor block. We do need need to worry about
442 * credits for modifying the quota inode.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000443 */
444 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
Olivier Deprez157378f2022-04-04 15:47:50 +0200445 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000446
447 if (IS_ERR(handle)) {
448 retval = PTR_ERR(handle);
Olivier Deprez0e641232021-09-23 10:07:05 +0200449 goto out_unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000450 }
451 goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
452 EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
453 owner[0] = i_uid_read(inode);
454 owner[1] = i_gid_read(inode);
455 tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
456 S_IFREG, NULL, goal, owner, 0);
457 if (IS_ERR(tmp_inode)) {
458 retval = PTR_ERR(tmp_inode);
459 ext4_journal_stop(handle);
Olivier Deprez0e641232021-09-23 10:07:05 +0200460 goto out_unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000461 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200462 /*
463 * Use the correct seed for checksum (i.e. the seed from 'inode'). This
464 * is so that the metadata blocks will have the correct checksum after
465 * the migration.
466 */
467 ei = EXT4_I(inode);
468 EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000469 i_size_write(tmp_inode, i_size_read(inode));
470 /*
471 * Set the i_nlink to zero so it will be deleted later
472 * when we drop inode reference.
473 */
474 clear_nlink(tmp_inode);
475
476 ext4_ext_tree_init(handle, tmp_inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000477 ext4_journal_stop(handle);
478
479 /*
480 * start with one credit accounted for
481 * superblock modification.
482 *
483 * For the tmp_inode we already have committed the
484 * transaction that created the inode. Later as and
485 * when we add extents we extent the journal
486 */
487 /*
488 * Even though we take i_mutex we can still cause block
489 * allocation via mmap write to holes. If we have allocated
490 * new blocks we fail migrate. New block allocation will
491 * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
492 * with i_data_sem held to prevent racing with block
493 * allocation.
494 */
495 down_read(&EXT4_I(inode)->i_data_sem);
496 ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
497 up_read((&EXT4_I(inode)->i_data_sem));
498
499 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
500 if (IS_ERR(handle)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000501 retval = PTR_ERR(handle);
Olivier Deprez0e641232021-09-23 10:07:05 +0200502 goto out_tmp_inode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000503 }
504
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000505 i_data = ei->i_data;
506 memset(&lb, 0, sizeof(lb));
507
508 /* 32 bit block address 4 bytes */
509 max_entries = inode->i_sb->s_blocksize >> 2;
510 for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
511 if (i_data[i]) {
512 retval = update_extent_range(handle, tmp_inode,
513 le32_to_cpu(i_data[i]), &lb);
514 if (retval)
515 goto err_out;
516 } else
517 lb.curr_block++;
518 }
519 if (i_data[EXT4_IND_BLOCK]) {
520 retval = update_ind_extent_range(handle, tmp_inode,
521 le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
David Brazdil0f672f62019-12-10 10:32:29 +0000522 if (retval)
523 goto err_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000524 } else
525 lb.curr_block += max_entries;
526 if (i_data[EXT4_DIND_BLOCK]) {
527 retval = update_dind_extent_range(handle, tmp_inode,
528 le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
David Brazdil0f672f62019-12-10 10:32:29 +0000529 if (retval)
530 goto err_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000531 } else
532 lb.curr_block += max_entries * max_entries;
533 if (i_data[EXT4_TIND_BLOCK]) {
534 retval = update_tind_extent_range(handle, tmp_inode,
535 le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
David Brazdil0f672f62019-12-10 10:32:29 +0000536 if (retval)
537 goto err_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000538 }
539 /*
540 * Build the last extent
541 */
542 retval = finish_range(handle, tmp_inode, &lb);
543err_out:
544 if (retval)
545 /*
546 * Failure case delete the extent information with the
547 * tmp_inode
548 */
549 free_ext_block(handle, tmp_inode);
550 else {
551 retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
552 if (retval)
553 /*
554 * if we fail to swap inode data free the extent
555 * details of the tmp inode
556 */
557 free_ext_block(handle, tmp_inode);
558 }
559
560 /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
Olivier Deprez157378f2022-04-04 15:47:50 +0200561 retval = ext4_journal_ensure_credits(handle, 1, 0);
562 if (retval < 0)
563 goto out_stop;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000564 /*
565 * Mark the tmp_inode as of size zero
566 */
567 i_size_write(tmp_inode, 0);
568
569 /*
570 * set the i_blocks count to zero
571 * so that the ext4_evict_inode() does the
572 * right job
573 *
574 * We don't need to take the i_lock because
575 * the inode is not visible to user space.
576 */
577 tmp_inode->i_blocks = 0;
578
579 /* Reset the extent details */
580 ext4_ext_tree_init(handle, tmp_inode);
Olivier Deprez157378f2022-04-04 15:47:50 +0200581out_stop:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000582 ext4_journal_stop(handle);
Olivier Deprez0e641232021-09-23 10:07:05 +0200583out_tmp_inode:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000584 unlock_new_inode(tmp_inode);
585 iput(tmp_inode);
Olivier Deprez0e641232021-09-23 10:07:05 +0200586out_unlock:
587 percpu_up_write(&sbi->s_writepages_rwsem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000588 return retval;
589}
590
591/*
592 * Migrate a simple extent-based inode to use the i_blocks[] array
593 */
594int ext4_ind_migrate(struct inode *inode)
595{
596 struct ext4_extent_header *eh;
Olivier Deprez0e641232021-09-23 10:07:05 +0200597 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
598 struct ext4_super_block *es = sbi->s_es;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000599 struct ext4_inode_info *ei = EXT4_I(inode);
600 struct ext4_extent *ex;
601 unsigned int i, len;
602 ext4_lblk_t start, end;
603 ext4_fsblk_t blk;
604 handle_t *handle;
Olivier Deprez157378f2022-04-04 15:47:50 +0200605 int ret, ret2 = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000606
607 if (!ext4_has_feature_extents(inode->i_sb) ||
608 (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
609 return -EINVAL;
610
611 if (ext4_has_feature_bigalloc(inode->i_sb))
612 return -EOPNOTSUPP;
613
614 /*
615 * In order to get correct extent info, force all delayed allocation
616 * blocks to be allocated, otherwise delayed allocation blocks may not
617 * be reflected and bypass the checks on extent header.
618 */
619 if (test_opt(inode->i_sb, DELALLOC))
620 ext4_alloc_da_blocks(inode);
621
Olivier Deprez0e641232021-09-23 10:07:05 +0200622 percpu_down_write(&sbi->s_writepages_rwsem);
623
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000624 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
Olivier Deprez0e641232021-09-23 10:07:05 +0200625 if (IS_ERR(handle)) {
626 ret = PTR_ERR(handle);
627 goto out_unlock;
628 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000629
630 down_write(&EXT4_I(inode)->i_data_sem);
631 ret = ext4_ext_check_inode(inode);
632 if (ret)
633 goto errout;
634
635 eh = ext_inode_hdr(inode);
636 ex = EXT_FIRST_EXTENT(eh);
637 if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
638 eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
639 ret = -EOPNOTSUPP;
640 goto errout;
641 }
642 if (eh->eh_entries == 0)
643 blk = len = start = end = 0;
644 else {
645 len = le16_to_cpu(ex->ee_len);
646 blk = ext4_ext_pblock(ex);
647 start = le32_to_cpu(ex->ee_block);
648 end = start + len - 1;
649 if (end >= EXT4_NDIR_BLOCKS) {
650 ret = -EOPNOTSUPP;
651 goto errout;
652 }
653 }
654
655 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
656 memset(ei->i_data, 0, sizeof(ei->i_data));
657 for (i = start; i <= end; i++)
658 ei->i_data[i] = cpu_to_le32(blk++);
Olivier Deprez157378f2022-04-04 15:47:50 +0200659 ret2 = ext4_mark_inode_dirty(handle, inode);
660 if (unlikely(ret2 && !ret))
661 ret = ret2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000662errout:
663 ext4_journal_stop(handle);
664 up_write(&EXT4_I(inode)->i_data_sem);
Olivier Deprez0e641232021-09-23 10:07:05 +0200665out_unlock:
666 percpu_up_write(&sbi->s_writepages_rwsem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000667 return ret;
668}