blob: be4ee3dcc5cf5da3b54c00f71a5e8eb1a541a751 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: LGPL-2.1
2/*
3 * Copyright IBM Corporation, 2007
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
6 */
7
8#include <linux/slab.h>
9#include "ext4_jbd2.h"
10#include "ext4_extents.h"
11
12/*
13 * The contiguous blocks details which can be
14 * represented by a single extent
15 */
16struct migrate_struct {
17 ext4_lblk_t first_block, last_block, curr_block;
18 ext4_fsblk_t first_pblock, last_pblock;
19};
20
21static int finish_range(handle_t *handle, struct inode *inode,
22 struct migrate_struct *lb)
23
24{
25 int retval = 0, needed;
26 struct ext4_extent newext;
27 struct ext4_ext_path *path;
28 if (lb->first_pblock == 0)
29 return 0;
30
31 /* Add the extent to temp inode*/
32 newext.ee_block = cpu_to_le32(lb->first_block);
33 newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
34 ext4_ext_store_pblock(&newext, lb->first_pblock);
35 /* Locking only for convinience since we are operating on temp inode */
36 down_write(&EXT4_I(inode)->i_data_sem);
37 path = ext4_find_extent(inode, lb->first_block, NULL, 0);
38 if (IS_ERR(path)) {
39 retval = PTR_ERR(path);
40 path = NULL;
41 goto err_out;
42 }
43
44 /*
45 * Calculate the credit needed to inserting this extent
46 * Since we are doing this in loop we may accumalate extra
47 * credit. But below we try to not accumalate too much
48 * of them by restarting the journal.
49 */
50 needed = ext4_ext_calc_credits_for_single_extent(inode,
51 lb->last_block - lb->first_block + 1, path);
52
53 /*
54 * Make sure the credit we accumalated is not really high
55 */
56 if (needed && ext4_handle_has_enough_credits(handle,
57 EXT4_RESERVE_TRANS_BLOCKS)) {
58 up_write((&EXT4_I(inode)->i_data_sem));
59 retval = ext4_journal_restart(handle, needed);
60 down_write((&EXT4_I(inode)->i_data_sem));
61 if (retval)
62 goto err_out;
63 } else if (needed) {
64 retval = ext4_journal_extend(handle, needed);
65 if (retval) {
66 /*
67 * IF not able to extend the journal restart the journal
68 */
69 up_write((&EXT4_I(inode)->i_data_sem));
70 retval = ext4_journal_restart(handle, needed);
71 down_write((&EXT4_I(inode)->i_data_sem));
72 if (retval)
73 goto err_out;
74 }
75 }
76 retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
77err_out:
78 up_write((&EXT4_I(inode)->i_data_sem));
79 ext4_ext_drop_refs(path);
80 kfree(path);
81 lb->first_pblock = 0;
82 return retval;
83}
84
85static int update_extent_range(handle_t *handle, struct inode *inode,
86 ext4_fsblk_t pblock, struct migrate_struct *lb)
87{
88 int retval;
89 /*
90 * See if we can add on to the existing range (if it exists)
91 */
92 if (lb->first_pblock &&
93 (lb->last_pblock+1 == pblock) &&
94 (lb->last_block+1 == lb->curr_block)) {
95 lb->last_pblock = pblock;
96 lb->last_block = lb->curr_block;
97 lb->curr_block++;
98 return 0;
99 }
100 /*
101 * Start a new range.
102 */
103 retval = finish_range(handle, inode, lb);
104 lb->first_pblock = lb->last_pblock = pblock;
105 lb->first_block = lb->last_block = lb->curr_block;
106 lb->curr_block++;
107 return retval;
108}
109
110static int update_ind_extent_range(handle_t *handle, struct inode *inode,
111 ext4_fsblk_t pblock,
112 struct migrate_struct *lb)
113{
114 struct buffer_head *bh;
115 __le32 *i_data;
116 int i, retval = 0;
117 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
118
David Brazdil0f672f62019-12-10 10:32:29 +0000119 bh = ext4_sb_bread(inode->i_sb, pblock, 0);
120 if (IS_ERR(bh))
121 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000122
123 i_data = (__le32 *)bh->b_data;
124 for (i = 0; i < max_entries; i++) {
125 if (i_data[i]) {
126 retval = update_extent_range(handle, inode,
127 le32_to_cpu(i_data[i]), lb);
128 if (retval)
129 break;
130 } else {
131 lb->curr_block++;
132 }
133 }
134 put_bh(bh);
135 return retval;
136
137}
138
139static int update_dind_extent_range(handle_t *handle, struct inode *inode,
140 ext4_fsblk_t pblock,
141 struct migrate_struct *lb)
142{
143 struct buffer_head *bh;
144 __le32 *i_data;
145 int i, retval = 0;
146 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
147
David Brazdil0f672f62019-12-10 10:32:29 +0000148 bh = ext4_sb_bread(inode->i_sb, pblock, 0);
149 if (IS_ERR(bh))
150 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151
152 i_data = (__le32 *)bh->b_data;
153 for (i = 0; i < max_entries; i++) {
154 if (i_data[i]) {
155 retval = update_ind_extent_range(handle, inode,
156 le32_to_cpu(i_data[i]), lb);
157 if (retval)
158 break;
159 } else {
160 /* Only update the file block number */
161 lb->curr_block += max_entries;
162 }
163 }
164 put_bh(bh);
165 return retval;
166
167}
168
169static int update_tind_extent_range(handle_t *handle, struct inode *inode,
170 ext4_fsblk_t pblock,
171 struct migrate_struct *lb)
172{
173 struct buffer_head *bh;
174 __le32 *i_data;
175 int i, retval = 0;
176 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
177
David Brazdil0f672f62019-12-10 10:32:29 +0000178 bh = ext4_sb_bread(inode->i_sb, pblock, 0);
179 if (IS_ERR(bh))
180 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000181
182 i_data = (__le32 *)bh->b_data;
183 for (i = 0; i < max_entries; i++) {
184 if (i_data[i]) {
185 retval = update_dind_extent_range(handle, inode,
186 le32_to_cpu(i_data[i]), lb);
187 if (retval)
188 break;
189 } else {
190 /* Only update the file block number */
191 lb->curr_block += max_entries * max_entries;
192 }
193 }
194 put_bh(bh);
195 return retval;
196
197}
198
199static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
200{
201 int retval = 0, needed;
202
203 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
204 return 0;
205 /*
206 * We are freeing a blocks. During this we touch
207 * superblock, group descriptor and block bitmap.
208 * So allocate a credit of 3. We may update
209 * quota (user and group).
210 */
211 needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
212
213 if (ext4_journal_extend(handle, needed) != 0)
214 retval = ext4_journal_restart(handle, needed);
215
216 return retval;
217}
218
219static int free_dind_blocks(handle_t *handle,
220 struct inode *inode, __le32 i_data)
221{
222 int i;
223 __le32 *tmp_idata;
224 struct buffer_head *bh;
225 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
226
David Brazdil0f672f62019-12-10 10:32:29 +0000227 bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
228 if (IS_ERR(bh))
229 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000230
231 tmp_idata = (__le32 *)bh->b_data;
232 for (i = 0; i < max_entries; i++) {
233 if (tmp_idata[i]) {
234 extend_credit_for_blkdel(handle, inode);
235 ext4_free_blocks(handle, inode, NULL,
236 le32_to_cpu(tmp_idata[i]), 1,
237 EXT4_FREE_BLOCKS_METADATA |
238 EXT4_FREE_BLOCKS_FORGET);
239 }
240 }
241 put_bh(bh);
242 extend_credit_for_blkdel(handle, inode);
243 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
244 EXT4_FREE_BLOCKS_METADATA |
245 EXT4_FREE_BLOCKS_FORGET);
246 return 0;
247}
248
249static int free_tind_blocks(handle_t *handle,
250 struct inode *inode, __le32 i_data)
251{
252 int i, retval = 0;
253 __le32 *tmp_idata;
254 struct buffer_head *bh;
255 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
256
David Brazdil0f672f62019-12-10 10:32:29 +0000257 bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
258 if (IS_ERR(bh))
259 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000260
261 tmp_idata = (__le32 *)bh->b_data;
262 for (i = 0; i < max_entries; i++) {
263 if (tmp_idata[i]) {
264 retval = free_dind_blocks(handle,
265 inode, tmp_idata[i]);
266 if (retval) {
267 put_bh(bh);
268 return retval;
269 }
270 }
271 }
272 put_bh(bh);
273 extend_credit_for_blkdel(handle, inode);
274 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
275 EXT4_FREE_BLOCKS_METADATA |
276 EXT4_FREE_BLOCKS_FORGET);
277 return 0;
278}
279
280static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
281{
282 int retval;
283
284 /* ei->i_data[EXT4_IND_BLOCK] */
285 if (i_data[0]) {
286 extend_credit_for_blkdel(handle, inode);
287 ext4_free_blocks(handle, inode, NULL,
288 le32_to_cpu(i_data[0]), 1,
289 EXT4_FREE_BLOCKS_METADATA |
290 EXT4_FREE_BLOCKS_FORGET);
291 }
292
293 /* ei->i_data[EXT4_DIND_BLOCK] */
294 if (i_data[1]) {
295 retval = free_dind_blocks(handle, inode, i_data[1]);
296 if (retval)
297 return retval;
298 }
299
300 /* ei->i_data[EXT4_TIND_BLOCK] */
301 if (i_data[2]) {
302 retval = free_tind_blocks(handle, inode, i_data[2]);
303 if (retval)
304 return retval;
305 }
306 return 0;
307}
308
309static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
310 struct inode *tmp_inode)
311{
312 int retval;
313 __le32 i_data[3];
314 struct ext4_inode_info *ei = EXT4_I(inode);
315 struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
316
317 /*
318 * One credit accounted for writing the
319 * i_data field of the original inode
320 */
321 retval = ext4_journal_extend(handle, 1);
322 if (retval) {
323 retval = ext4_journal_restart(handle, 1);
324 if (retval)
325 goto err_out;
326 }
327
328 i_data[0] = ei->i_data[EXT4_IND_BLOCK];
329 i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
330 i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
331
332 down_write(&EXT4_I(inode)->i_data_sem);
333 /*
334 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
335 * happened after we started the migrate. We need to
336 * fail the migrate
337 */
338 if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
339 retval = -EAGAIN;
340 up_write(&EXT4_I(inode)->i_data_sem);
341 goto err_out;
342 } else
343 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
344 /*
345 * We have the extent map build with the tmp inode.
346 * Now copy the i_data across
347 */
348 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
349 memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
350
351 /*
352 * Update i_blocks with the new blocks that got
353 * allocated while adding extents for extent index
354 * blocks.
355 *
356 * While converting to extents we need not
357 * update the original inode i_blocks for extent blocks
358 * via quota APIs. The quota update happened via tmp_inode already.
359 */
360 spin_lock(&inode->i_lock);
361 inode->i_blocks += tmp_inode->i_blocks;
362 spin_unlock(&inode->i_lock);
363 up_write(&EXT4_I(inode)->i_data_sem);
364
365 /*
366 * We mark the inode dirty after, because we decrement the
367 * i_blocks when freeing the indirect meta-data blocks
368 */
369 retval = free_ind_block(handle, inode, i_data);
370 ext4_mark_inode_dirty(handle, inode);
371
372err_out:
373 return retval;
374}
375
376static int free_ext_idx(handle_t *handle, struct inode *inode,
377 struct ext4_extent_idx *ix)
378{
379 int i, retval = 0;
380 ext4_fsblk_t block;
381 struct buffer_head *bh;
382 struct ext4_extent_header *eh;
383
384 block = ext4_idx_pblock(ix);
David Brazdil0f672f62019-12-10 10:32:29 +0000385 bh = ext4_sb_bread(inode->i_sb, block, 0);
386 if (IS_ERR(bh))
387 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000388
389 eh = (struct ext4_extent_header *)bh->b_data;
390 if (eh->eh_depth != 0) {
391 ix = EXT_FIRST_INDEX(eh);
392 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
393 retval = free_ext_idx(handle, inode, ix);
394 if (retval)
395 break;
396 }
397 }
398 put_bh(bh);
399 extend_credit_for_blkdel(handle, inode);
400 ext4_free_blocks(handle, inode, NULL, block, 1,
401 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
402 return retval;
403}
404
405/*
406 * Free the extent meta data blocks only
407 */
408static int free_ext_block(handle_t *handle, struct inode *inode)
409{
410 int i, retval = 0;
411 struct ext4_inode_info *ei = EXT4_I(inode);
412 struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
413 struct ext4_extent_idx *ix;
414 if (eh->eh_depth == 0)
415 /*
416 * No extra blocks allocated for extent meta data
417 */
418 return 0;
419 ix = EXT_FIRST_INDEX(eh);
420 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
421 retval = free_ext_idx(handle, inode, ix);
422 if (retval)
423 return retval;
424 }
425 return retval;
426}
427
428int ext4_ext_migrate(struct inode *inode)
429{
Olivier Deprez0e641232021-09-23 10:07:05 +0200430 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000431 handle_t *handle;
432 int retval = 0, i;
433 __le32 *i_data;
434 struct ext4_inode_info *ei;
435 struct inode *tmp_inode = NULL;
436 struct migrate_struct lb;
437 unsigned long max_entries;
438 __u32 goal;
439 uid_t owner[2];
440
441 /*
442 * If the filesystem does not support extents, or the inode
443 * already is extent-based, error out.
444 */
445 if (!ext4_has_feature_extents(inode->i_sb) ||
446 (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
447 return -EINVAL;
448
449 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
450 /*
451 * don't migrate fast symlink
452 */
453 return retval;
454
Olivier Deprez0e641232021-09-23 10:07:05 +0200455 percpu_down_write(&sbi->s_writepages_rwsem);
456
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000457 /*
458 * Worst case we can touch the allocation bitmaps, a bgd
459 * block, and a block to link in the orphan list. We do need
460 * need to worry about credits for modifying the quota inode.
461 */
462 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
463 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
464
465 if (IS_ERR(handle)) {
466 retval = PTR_ERR(handle);
Olivier Deprez0e641232021-09-23 10:07:05 +0200467 goto out_unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000468 }
469 goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
470 EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
471 owner[0] = i_uid_read(inode);
472 owner[1] = i_gid_read(inode);
473 tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
474 S_IFREG, NULL, goal, owner, 0);
475 if (IS_ERR(tmp_inode)) {
476 retval = PTR_ERR(tmp_inode);
477 ext4_journal_stop(handle);
Olivier Deprez0e641232021-09-23 10:07:05 +0200478 goto out_unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000479 }
480 i_size_write(tmp_inode, i_size_read(inode));
481 /*
482 * Set the i_nlink to zero so it will be deleted later
483 * when we drop inode reference.
484 */
485 clear_nlink(tmp_inode);
486
487 ext4_ext_tree_init(handle, tmp_inode);
488 ext4_orphan_add(handle, tmp_inode);
489 ext4_journal_stop(handle);
490
491 /*
492 * start with one credit accounted for
493 * superblock modification.
494 *
495 * For the tmp_inode we already have committed the
496 * transaction that created the inode. Later as and
497 * when we add extents we extent the journal
498 */
499 /*
500 * Even though we take i_mutex we can still cause block
501 * allocation via mmap write to holes. If we have allocated
502 * new blocks we fail migrate. New block allocation will
503 * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
504 * with i_data_sem held to prevent racing with block
505 * allocation.
506 */
507 down_read(&EXT4_I(inode)->i_data_sem);
508 ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
509 up_read((&EXT4_I(inode)->i_data_sem));
510
511 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
512 if (IS_ERR(handle)) {
513 /*
514 * It is impossible to update on-disk structures without
515 * a handle, so just rollback in-core changes and live other
516 * work to orphan_list_cleanup()
517 */
518 ext4_orphan_del(NULL, tmp_inode);
519 retval = PTR_ERR(handle);
Olivier Deprez0e641232021-09-23 10:07:05 +0200520 goto out_tmp_inode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000521 }
522
523 ei = EXT4_I(inode);
524 i_data = ei->i_data;
525 memset(&lb, 0, sizeof(lb));
526
527 /* 32 bit block address 4 bytes */
528 max_entries = inode->i_sb->s_blocksize >> 2;
529 for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
530 if (i_data[i]) {
531 retval = update_extent_range(handle, tmp_inode,
532 le32_to_cpu(i_data[i]), &lb);
533 if (retval)
534 goto err_out;
535 } else
536 lb.curr_block++;
537 }
538 if (i_data[EXT4_IND_BLOCK]) {
539 retval = update_ind_extent_range(handle, tmp_inode,
540 le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
David Brazdil0f672f62019-12-10 10:32:29 +0000541 if (retval)
542 goto err_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000543 } else
544 lb.curr_block += max_entries;
545 if (i_data[EXT4_DIND_BLOCK]) {
546 retval = update_dind_extent_range(handle, tmp_inode,
547 le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
David Brazdil0f672f62019-12-10 10:32:29 +0000548 if (retval)
549 goto err_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550 } else
551 lb.curr_block += max_entries * max_entries;
552 if (i_data[EXT4_TIND_BLOCK]) {
553 retval = update_tind_extent_range(handle, tmp_inode,
554 le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
David Brazdil0f672f62019-12-10 10:32:29 +0000555 if (retval)
556 goto err_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000557 }
558 /*
559 * Build the last extent
560 */
561 retval = finish_range(handle, tmp_inode, &lb);
562err_out:
563 if (retval)
564 /*
565 * Failure case delete the extent information with the
566 * tmp_inode
567 */
568 free_ext_block(handle, tmp_inode);
569 else {
570 retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
571 if (retval)
572 /*
573 * if we fail to swap inode data free the extent
574 * details of the tmp inode
575 */
576 free_ext_block(handle, tmp_inode);
577 }
578
579 /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
580 if (ext4_journal_extend(handle, 1) != 0)
581 ext4_journal_restart(handle, 1);
582
583 /*
584 * Mark the tmp_inode as of size zero
585 */
586 i_size_write(tmp_inode, 0);
587
588 /*
589 * set the i_blocks count to zero
590 * so that the ext4_evict_inode() does the
591 * right job
592 *
593 * We don't need to take the i_lock because
594 * the inode is not visible to user space.
595 */
596 tmp_inode->i_blocks = 0;
597
598 /* Reset the extent details */
599 ext4_ext_tree_init(handle, tmp_inode);
600 ext4_journal_stop(handle);
Olivier Deprez0e641232021-09-23 10:07:05 +0200601out_tmp_inode:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000602 unlock_new_inode(tmp_inode);
603 iput(tmp_inode);
Olivier Deprez0e641232021-09-23 10:07:05 +0200604out_unlock:
605 percpu_up_write(&sbi->s_writepages_rwsem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000606 return retval;
607}
608
609/*
610 * Migrate a simple extent-based inode to use the i_blocks[] array
611 */
612int ext4_ind_migrate(struct inode *inode)
613{
614 struct ext4_extent_header *eh;
Olivier Deprez0e641232021-09-23 10:07:05 +0200615 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
616 struct ext4_super_block *es = sbi->s_es;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000617 struct ext4_inode_info *ei = EXT4_I(inode);
618 struct ext4_extent *ex;
619 unsigned int i, len;
620 ext4_lblk_t start, end;
621 ext4_fsblk_t blk;
622 handle_t *handle;
623 int ret;
624
625 if (!ext4_has_feature_extents(inode->i_sb) ||
626 (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
627 return -EINVAL;
628
629 if (ext4_has_feature_bigalloc(inode->i_sb))
630 return -EOPNOTSUPP;
631
632 /*
633 * In order to get correct extent info, force all delayed allocation
634 * blocks to be allocated, otherwise delayed allocation blocks may not
635 * be reflected and bypass the checks on extent header.
636 */
637 if (test_opt(inode->i_sb, DELALLOC))
638 ext4_alloc_da_blocks(inode);
639
Olivier Deprez0e641232021-09-23 10:07:05 +0200640 percpu_down_write(&sbi->s_writepages_rwsem);
641
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000642 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
Olivier Deprez0e641232021-09-23 10:07:05 +0200643 if (IS_ERR(handle)) {
644 ret = PTR_ERR(handle);
645 goto out_unlock;
646 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000647
648 down_write(&EXT4_I(inode)->i_data_sem);
649 ret = ext4_ext_check_inode(inode);
650 if (ret)
651 goto errout;
652
653 eh = ext_inode_hdr(inode);
654 ex = EXT_FIRST_EXTENT(eh);
655 if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
656 eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
657 ret = -EOPNOTSUPP;
658 goto errout;
659 }
660 if (eh->eh_entries == 0)
661 blk = len = start = end = 0;
662 else {
663 len = le16_to_cpu(ex->ee_len);
664 blk = ext4_ext_pblock(ex);
665 start = le32_to_cpu(ex->ee_block);
666 end = start + len - 1;
667 if (end >= EXT4_NDIR_BLOCKS) {
668 ret = -EOPNOTSUPP;
669 goto errout;
670 }
671 }
672
673 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
674 memset(ei->i_data, 0, sizeof(ei->i_data));
675 for (i = start; i <= end; i++)
676 ei->i_data[i] = cpu_to_le32(blk++);
677 ext4_mark_inode_dirty(handle, inode);
678errout:
679 ext4_journal_stop(handle);
680 up_write(&EXT4_I(inode)->i_data_sem);
Olivier Deprez0e641232021-09-23 10:07:05 +0200681out_unlock:
682 percpu_up_write(&sbi->s_writepages_rwsem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000683 return ret;
684}