blob: 507f8f910327081aca198227fa50b88669f3db18 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * inode.c
3 *
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
6 *
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
12 *
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
16 *
17 * HISTORY
18 *
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map
23 * and udf_read_inode
24 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
25 * block boundaries (which is not actually allowed)
26 * 12/20/98 added support for strategy 4096
27 * 03/07/99 rewrote udf_block_map (again)
28 * New funcs, inode_bmap, udf_next_aext
29 * 04/19/99 Support for writing device EA's for major/minor #
30 */
31
32#include "udfdecl.h"
33#include <linux/mm.h>
34#include <linux/module.h>
35#include <linux/pagemap.h>
36#include <linux/writeback.h>
37#include <linux/slab.h>
38#include <linux/crc-itu-t.h>
39#include <linux/mpage.h>
40#include <linux/uio.h>
41#include <linux/bio.h>
42
43#include "udf_i.h"
44#include "udf_sb.h"
45
46#define EXTENT_MERGE_SIZE 5
47
David Brazdil0f672f62019-12-10 10:32:29 +000048#define FE_MAPPED_PERMS (FE_PERM_U_READ | FE_PERM_U_WRITE | FE_PERM_U_EXEC | \
49 FE_PERM_G_READ | FE_PERM_G_WRITE | FE_PERM_G_EXEC | \
50 FE_PERM_O_READ | FE_PERM_O_WRITE | FE_PERM_O_EXEC)
51
52#define FE_DELETE_PERMS (FE_PERM_U_DELETE | FE_PERM_G_DELETE | \
53 FE_PERM_O_DELETE)
54
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055static umode_t udf_convert_permissions(struct fileEntry *);
56static int udf_update_inode(struct inode *, int);
57static int udf_sync_inode(struct inode *inode);
58static int udf_alloc_i_data(struct inode *inode, size_t size);
59static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
60static int8_t udf_insert_aext(struct inode *, struct extent_position,
61 struct kernel_lb_addr, uint32_t);
62static void udf_split_extents(struct inode *, int *, int, udf_pblk_t,
63 struct kernel_long_ad *, int *);
64static void udf_prealloc_extents(struct inode *, int, int,
65 struct kernel_long_ad *, int *);
66static void udf_merge_extents(struct inode *, struct kernel_long_ad *, int *);
67static void udf_update_extents(struct inode *, struct kernel_long_ad *, int,
68 int, struct extent_position *);
69static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
70
71static void __udf_clear_extent_cache(struct inode *inode)
72{
73 struct udf_inode_info *iinfo = UDF_I(inode);
74
75 if (iinfo->cached_extent.lstart != -1) {
76 brelse(iinfo->cached_extent.epos.bh);
77 iinfo->cached_extent.lstart = -1;
78 }
79}
80
81/* Invalidate extent cache */
82static void udf_clear_extent_cache(struct inode *inode)
83{
84 struct udf_inode_info *iinfo = UDF_I(inode);
85
86 spin_lock(&iinfo->i_extent_cache_lock);
87 __udf_clear_extent_cache(inode);
88 spin_unlock(&iinfo->i_extent_cache_lock);
89}
90
91/* Return contents of extent cache */
92static int udf_read_extent_cache(struct inode *inode, loff_t bcount,
93 loff_t *lbcount, struct extent_position *pos)
94{
95 struct udf_inode_info *iinfo = UDF_I(inode);
96 int ret = 0;
97
98 spin_lock(&iinfo->i_extent_cache_lock);
99 if ((iinfo->cached_extent.lstart <= bcount) &&
100 (iinfo->cached_extent.lstart != -1)) {
101 /* Cache hit */
102 *lbcount = iinfo->cached_extent.lstart;
103 memcpy(pos, &iinfo->cached_extent.epos,
104 sizeof(struct extent_position));
105 if (pos->bh)
106 get_bh(pos->bh);
107 ret = 1;
108 }
109 spin_unlock(&iinfo->i_extent_cache_lock);
110 return ret;
111}
112
113/* Add extent to extent cache */
114static void udf_update_extent_cache(struct inode *inode, loff_t estart,
115 struct extent_position *pos)
116{
117 struct udf_inode_info *iinfo = UDF_I(inode);
118
119 spin_lock(&iinfo->i_extent_cache_lock);
120 /* Invalidate previously cached extent */
121 __udf_clear_extent_cache(inode);
122 if (pos->bh)
123 get_bh(pos->bh);
124 memcpy(&iinfo->cached_extent.epos, pos, sizeof(*pos));
125 iinfo->cached_extent.lstart = estart;
126 switch (iinfo->i_alloc_type) {
127 case ICBTAG_FLAG_AD_SHORT:
128 iinfo->cached_extent.epos.offset -= sizeof(struct short_ad);
129 break;
130 case ICBTAG_FLAG_AD_LONG:
131 iinfo->cached_extent.epos.offset -= sizeof(struct long_ad);
132 break;
133 }
134 spin_unlock(&iinfo->i_extent_cache_lock);
135}
136
137void udf_evict_inode(struct inode *inode)
138{
139 struct udf_inode_info *iinfo = UDF_I(inode);
140 int want_delete = 0;
141
Olivier Deprez0e641232021-09-23 10:07:05 +0200142 if (!is_bad_inode(inode)) {
143 if (!inode->i_nlink) {
144 want_delete = 1;
145 udf_setsize(inode, 0);
146 udf_update_inode(inode, IS_SYNC(inode));
147 }
148 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
149 inode->i_size != iinfo->i_lenExtents) {
150 udf_warn(inode->i_sb,
151 "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
152 inode->i_ino, inode->i_mode,
153 (unsigned long long)inode->i_size,
154 (unsigned long long)iinfo->i_lenExtents);
155 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156 }
157 truncate_inode_pages_final(&inode->i_data);
158 invalidate_inode_buffers(inode);
159 clear_inode(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000160 kfree(iinfo->i_ext.i_data);
161 iinfo->i_ext.i_data = NULL;
162 udf_clear_extent_cache(inode);
163 if (want_delete) {
164 udf_free_inode(inode);
165 }
166}
167
168static void udf_write_failed(struct address_space *mapping, loff_t to)
169{
170 struct inode *inode = mapping->host;
171 struct udf_inode_info *iinfo = UDF_I(inode);
172 loff_t isize = inode->i_size;
173
174 if (to > isize) {
175 truncate_pagecache(inode, isize);
176 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
177 down_write(&iinfo->i_data_sem);
178 udf_clear_extent_cache(inode);
179 udf_truncate_extents(inode);
180 up_write(&iinfo->i_data_sem);
181 }
182 }
183}
184
185static int udf_writepage(struct page *page, struct writeback_control *wbc)
186{
187 return block_write_full_page(page, udf_get_block, wbc);
188}
189
190static int udf_writepages(struct address_space *mapping,
191 struct writeback_control *wbc)
192{
193 return mpage_writepages(mapping, wbc, udf_get_block);
194}
195
196static int udf_readpage(struct file *file, struct page *page)
197{
198 return mpage_readpage(page, udf_get_block);
199}
200
201static int udf_readpages(struct file *file, struct address_space *mapping,
202 struct list_head *pages, unsigned nr_pages)
203{
204 return mpage_readpages(mapping, pages, nr_pages, udf_get_block);
205}
206
207static int udf_write_begin(struct file *file, struct address_space *mapping,
208 loff_t pos, unsigned len, unsigned flags,
209 struct page **pagep, void **fsdata)
210{
211 int ret;
212
213 ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
214 if (unlikely(ret))
215 udf_write_failed(mapping, pos + len);
216 return ret;
217}
218
219static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
220{
221 struct file *file = iocb->ki_filp;
222 struct address_space *mapping = file->f_mapping;
223 struct inode *inode = mapping->host;
224 size_t count = iov_iter_count(iter);
225 ssize_t ret;
226
227 ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block);
228 if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
229 udf_write_failed(mapping, iocb->ki_pos + count);
230 return ret;
231}
232
233static sector_t udf_bmap(struct address_space *mapping, sector_t block)
234{
235 return generic_block_bmap(mapping, block, udf_get_block);
236}
237
238const struct address_space_operations udf_aops = {
239 .readpage = udf_readpage,
240 .readpages = udf_readpages,
241 .writepage = udf_writepage,
242 .writepages = udf_writepages,
243 .write_begin = udf_write_begin,
244 .write_end = generic_write_end,
245 .direct_IO = udf_direct_IO,
246 .bmap = udf_bmap,
247};
248
249/*
250 * Expand file stored in ICB to a normal one-block-file
251 *
252 * This function requires i_data_sem for writing and releases it.
253 * This function requires i_mutex held
254 */
255int udf_expand_file_adinicb(struct inode *inode)
256{
257 struct page *page;
258 char *kaddr;
259 struct udf_inode_info *iinfo = UDF_I(inode);
260 int err;
261 struct writeback_control udf_wbc = {
262 .sync_mode = WB_SYNC_NONE,
263 .nr_to_write = 1,
264 };
265
266 WARN_ON_ONCE(!inode_is_locked(inode));
267 if (!iinfo->i_lenAlloc) {
268 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
269 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
270 else
271 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
272 /* from now on we have normal address_space methods */
273 inode->i_data.a_ops = &udf_aops;
274 up_write(&iinfo->i_data_sem);
275 mark_inode_dirty(inode);
276 return 0;
277 }
278 /*
279 * Release i_data_sem so that we can lock a page - page lock ranks
280 * above i_data_sem. i_mutex still protects us against file changes.
281 */
282 up_write(&iinfo->i_data_sem);
283
284 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
285 if (!page)
286 return -ENOMEM;
287
288 if (!PageUptodate(page)) {
289 kaddr = kmap_atomic(page);
290 memset(kaddr + iinfo->i_lenAlloc, 0x00,
291 PAGE_SIZE - iinfo->i_lenAlloc);
292 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
293 iinfo->i_lenAlloc);
294 flush_dcache_page(page);
295 SetPageUptodate(page);
296 kunmap_atomic(kaddr);
297 }
298 down_write(&iinfo->i_data_sem);
299 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
300 iinfo->i_lenAlloc);
301 iinfo->i_lenAlloc = 0;
302 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
303 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
304 else
305 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
306 /* from now on we have normal address_space methods */
307 inode->i_data.a_ops = &udf_aops;
308 up_write(&iinfo->i_data_sem);
309 err = inode->i_data.a_ops->writepage(page, &udf_wbc);
310 if (err) {
311 /* Restore everything back so that we don't lose data... */
312 lock_page(page);
313 down_write(&iinfo->i_data_sem);
314 kaddr = kmap_atomic(page);
315 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr,
316 inode->i_size);
317 kunmap_atomic(kaddr);
318 unlock_page(page);
319 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
320 inode->i_data.a_ops = &udf_adinicb_aops;
321 up_write(&iinfo->i_data_sem);
322 }
323 put_page(page);
324 mark_inode_dirty(inode);
325
326 return err;
327}
328
329struct buffer_head *udf_expand_dir_adinicb(struct inode *inode,
330 udf_pblk_t *block, int *err)
331{
332 udf_pblk_t newblock;
333 struct buffer_head *dbh = NULL;
334 struct kernel_lb_addr eloc;
335 uint8_t alloctype;
336 struct extent_position epos;
337
338 struct udf_fileident_bh sfibh, dfibh;
339 loff_t f_pos = udf_ext0_offset(inode);
340 int size = udf_ext0_offset(inode) + inode->i_size;
341 struct fileIdentDesc cfi, *sfi, *dfi;
342 struct udf_inode_info *iinfo = UDF_I(inode);
343
344 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
345 alloctype = ICBTAG_FLAG_AD_SHORT;
346 else
347 alloctype = ICBTAG_FLAG_AD_LONG;
348
349 if (!inode->i_size) {
350 iinfo->i_alloc_type = alloctype;
351 mark_inode_dirty(inode);
352 return NULL;
353 }
354
355 /* alloc block, and copy data to it */
356 *block = udf_new_block(inode->i_sb, inode,
357 iinfo->i_location.partitionReferenceNum,
358 iinfo->i_location.logicalBlockNum, err);
359 if (!(*block))
360 return NULL;
361 newblock = udf_get_pblock(inode->i_sb, *block,
362 iinfo->i_location.partitionReferenceNum,
363 0);
364 if (!newblock)
365 return NULL;
366 dbh = udf_tgetblk(inode->i_sb, newblock);
367 if (!dbh)
368 return NULL;
369 lock_buffer(dbh);
370 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
371 set_buffer_uptodate(dbh);
372 unlock_buffer(dbh);
373 mark_buffer_dirty_inode(dbh, inode);
374
375 sfibh.soffset = sfibh.eoffset =
376 f_pos & (inode->i_sb->s_blocksize - 1);
377 sfibh.sbh = sfibh.ebh = NULL;
378 dfibh.soffset = dfibh.eoffset = 0;
379 dfibh.sbh = dfibh.ebh = dbh;
380 while (f_pos < size) {
381 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
382 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL,
383 NULL, NULL, NULL);
384 if (!sfi) {
385 brelse(dbh);
386 return NULL;
387 }
388 iinfo->i_alloc_type = alloctype;
389 sfi->descTag.tagLocation = cpu_to_le32(*block);
390 dfibh.soffset = dfibh.eoffset;
391 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
392 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
393 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
394 sfi->fileIdent +
395 le16_to_cpu(sfi->lengthOfImpUse))) {
396 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
397 brelse(dbh);
398 return NULL;
399 }
400 }
401 mark_buffer_dirty_inode(dbh, inode);
402
403 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0,
404 iinfo->i_lenAlloc);
405 iinfo->i_lenAlloc = 0;
406 eloc.logicalBlockNum = *block;
407 eloc.partitionReferenceNum =
408 iinfo->i_location.partitionReferenceNum;
409 iinfo->i_lenExtents = inode->i_size;
410 epos.bh = NULL;
411 epos.block = iinfo->i_location;
412 epos.offset = udf_file_entry_alloc_offset(inode);
413 udf_add_aext(inode, &epos, &eloc, inode->i_size, 0);
414 /* UniqueID stuff */
415
416 brelse(epos.bh);
417 mark_inode_dirty(inode);
418 return dbh;
419}
420
421static int udf_get_block(struct inode *inode, sector_t block,
422 struct buffer_head *bh_result, int create)
423{
424 int err, new;
425 sector_t phys = 0;
426 struct udf_inode_info *iinfo;
427
428 if (!create) {
429 phys = udf_block_map(inode, block);
430 if (phys)
431 map_bh(bh_result, inode->i_sb, phys);
432 return 0;
433 }
434
435 err = -EIO;
436 new = 0;
437 iinfo = UDF_I(inode);
438
439 down_write(&iinfo->i_data_sem);
440 if (block == iinfo->i_next_alloc_block + 1) {
441 iinfo->i_next_alloc_block++;
442 iinfo->i_next_alloc_goal++;
443 }
444
445 udf_clear_extent_cache(inode);
446 phys = inode_getblk(inode, block, &err, &new);
447 if (!phys)
448 goto abort;
449
450 if (new)
451 set_buffer_new(bh_result);
452 map_bh(bh_result, inode->i_sb, phys);
453
454abort:
455 up_write(&iinfo->i_data_sem);
456 return err;
457}
458
459static struct buffer_head *udf_getblk(struct inode *inode, udf_pblk_t block,
460 int create, int *err)
461{
462 struct buffer_head *bh;
463 struct buffer_head dummy;
464
465 dummy.b_state = 0;
466 dummy.b_blocknr = -1000;
467 *err = udf_get_block(inode, block, &dummy, create);
468 if (!*err && buffer_mapped(&dummy)) {
469 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
470 if (buffer_new(&dummy)) {
471 lock_buffer(bh);
472 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
473 set_buffer_uptodate(bh);
474 unlock_buffer(bh);
475 mark_buffer_dirty_inode(bh, inode);
476 }
477 return bh;
478 }
479
480 return NULL;
481}
482
David Brazdil0f672f62019-12-10 10:32:29 +0000483/* Extend the file with new blocks totaling 'new_block_bytes',
484 * return the number of extents added
485 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000486static int udf_do_extend_file(struct inode *inode,
487 struct extent_position *last_pos,
488 struct kernel_long_ad *last_ext,
David Brazdil0f672f62019-12-10 10:32:29 +0000489 loff_t new_block_bytes)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000490{
David Brazdil0f672f62019-12-10 10:32:29 +0000491 uint32_t add;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000492 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
493 struct super_block *sb = inode->i_sb;
494 struct kernel_lb_addr prealloc_loc = {};
495 uint32_t prealloc_len = 0;
496 struct udf_inode_info *iinfo;
497 int err;
498
499 /* The previous extent is fake and we should not extend by anything
500 * - there's nothing to do... */
David Brazdil0f672f62019-12-10 10:32:29 +0000501 if (!new_block_bytes && fake)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000502 return 0;
503
504 iinfo = UDF_I(inode);
505 /* Round the last extent up to a multiple of block size */
506 if (last_ext->extLength & (sb->s_blocksize - 1)) {
507 last_ext->extLength =
508 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
509 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
510 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
511 iinfo->i_lenExtents =
512 (iinfo->i_lenExtents + sb->s_blocksize - 1) &
513 ~(sb->s_blocksize - 1);
514 }
515
516 /* Last extent are just preallocated blocks? */
517 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
518 EXT_NOT_RECORDED_ALLOCATED) {
519 /* Save the extent so that we can reattach it to the end */
520 prealloc_loc = last_ext->extLocation;
521 prealloc_len = last_ext->extLength;
522 /* Mark the extent as a hole */
523 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
524 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
525 last_ext->extLocation.logicalBlockNum = 0;
526 last_ext->extLocation.partitionReferenceNum = 0;
527 }
528
529 /* Can we merge with the previous extent? */
530 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
531 EXT_NOT_RECORDED_NOT_ALLOCATED) {
David Brazdil0f672f62019-12-10 10:32:29 +0000532 add = (1 << 30) - sb->s_blocksize -
533 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
534 if (add > new_block_bytes)
535 add = new_block_bytes;
536 new_block_bytes -= add;
537 last_ext->extLength += add;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000538 }
539
540 if (fake) {
541 udf_add_aext(inode, last_pos, &last_ext->extLocation,
542 last_ext->extLength, 1);
543 count++;
544 } else {
545 struct kernel_lb_addr tmploc;
546 uint32_t tmplen;
547
548 udf_write_aext(inode, last_pos, &last_ext->extLocation,
549 last_ext->extLength, 1);
Olivier Deprez0e641232021-09-23 10:07:05 +0200550
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000551 /*
Olivier Deprez0e641232021-09-23 10:07:05 +0200552 * We've rewritten the last extent. If we are going to add
553 * more extents, we may need to enter possible following
554 * empty indirect extent.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000555 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200556 if (new_block_bytes || prealloc_len)
557 udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000558 }
559
560 /* Managed to do everything necessary? */
David Brazdil0f672f62019-12-10 10:32:29 +0000561 if (!new_block_bytes)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000562 goto out;
563
564 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
565 last_ext->extLocation.logicalBlockNum = 0;
566 last_ext->extLocation.partitionReferenceNum = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000567 add = (1 << 30) - sb->s_blocksize;
568 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000569
570 /* Create enough extents to cover the whole hole */
David Brazdil0f672f62019-12-10 10:32:29 +0000571 while (new_block_bytes > add) {
572 new_block_bytes -= add;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000573 err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
574 last_ext->extLength, 1);
575 if (err)
576 return err;
577 count++;
578 }
David Brazdil0f672f62019-12-10 10:32:29 +0000579 if (new_block_bytes) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000580 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
David Brazdil0f672f62019-12-10 10:32:29 +0000581 new_block_bytes;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000582 err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
583 last_ext->extLength, 1);
584 if (err)
585 return err;
586 count++;
587 }
588
589out:
590 /* Do we have some preallocated blocks saved? */
591 if (prealloc_len) {
592 err = udf_add_aext(inode, last_pos, &prealloc_loc,
593 prealloc_len, 1);
594 if (err)
595 return err;
596 last_ext->extLocation = prealloc_loc;
597 last_ext->extLength = prealloc_len;
598 count++;
599 }
600
601 /* last_pos should point to the last written extent... */
602 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
603 last_pos->offset -= sizeof(struct short_ad);
604 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
605 last_pos->offset -= sizeof(struct long_ad);
606 else
607 return -EIO;
608
609 return count;
610}
611
David Brazdil0f672f62019-12-10 10:32:29 +0000612/* Extend the final block of the file to final_block_len bytes */
613static void udf_do_extend_final_block(struct inode *inode,
614 struct extent_position *last_pos,
615 struct kernel_long_ad *last_ext,
616 uint32_t final_block_len)
617{
618 struct super_block *sb = inode->i_sb;
619 uint32_t added_bytes;
620
621 added_bytes = final_block_len -
622 (last_ext->extLength & (sb->s_blocksize - 1));
623 last_ext->extLength += added_bytes;
624 UDF_I(inode)->i_lenExtents += added_bytes;
625
626 udf_write_aext(inode, last_pos, &last_ext->extLocation,
627 last_ext->extLength, 1);
628}
629
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000630static int udf_extend_file(struct inode *inode, loff_t newsize)
631{
632
633 struct extent_position epos;
634 struct kernel_lb_addr eloc;
635 uint32_t elen;
636 int8_t etype;
637 struct super_block *sb = inode->i_sb;
638 sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
David Brazdil0f672f62019-12-10 10:32:29 +0000639 unsigned long partial_final_block;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000640 int adsize;
641 struct udf_inode_info *iinfo = UDF_I(inode);
642 struct kernel_long_ad extent;
David Brazdil0f672f62019-12-10 10:32:29 +0000643 int err = 0;
644 int within_final_block;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000645
646 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
647 adsize = sizeof(struct short_ad);
648 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
649 adsize = sizeof(struct long_ad);
650 else
651 BUG();
652
653 etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
David Brazdil0f672f62019-12-10 10:32:29 +0000654 within_final_block = (etype != -1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000655
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000656 if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
657 (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
658 /* File has no extents at all or has empty last
659 * indirect extent! Create a fake extent... */
660 extent.extLocation.logicalBlockNum = 0;
661 extent.extLocation.partitionReferenceNum = 0;
662 extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
663 } else {
664 epos.offset -= adsize;
665 etype = udf_next_aext(inode, &epos, &extent.extLocation,
666 &extent.extLength, 0);
667 extent.extLength |= etype << 30;
668 }
David Brazdil0f672f62019-12-10 10:32:29 +0000669
670 partial_final_block = newsize & (sb->s_blocksize - 1);
671
672 /* File has extent covering the new size (could happen when extending
673 * inside a block)?
674 */
675 if (within_final_block) {
676 /* Extending file within the last file block */
677 udf_do_extend_final_block(inode, &epos, &extent,
678 partial_final_block);
679 } else {
680 loff_t add = ((loff_t)offset << sb->s_blocksize_bits) |
681 partial_final_block;
682 err = udf_do_extend_file(inode, &epos, &extent, add);
683 }
684
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000685 if (err < 0)
686 goto out;
687 err = 0;
688 iinfo->i_lenExtents = newsize;
689out:
690 brelse(epos.bh);
691 return err;
692}
693
694static sector_t inode_getblk(struct inode *inode, sector_t block,
695 int *err, int *new)
696{
697 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
698 struct extent_position prev_epos, cur_epos, next_epos;
699 int count = 0, startnum = 0, endnum = 0;
700 uint32_t elen = 0, tmpelen;
701 struct kernel_lb_addr eloc, tmpeloc;
702 int c = 1;
703 loff_t lbcount = 0, b_off = 0;
704 udf_pblk_t newblocknum, newblock;
705 sector_t offset = 0;
706 int8_t etype;
707 struct udf_inode_info *iinfo = UDF_I(inode);
708 udf_pblk_t goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
709 int lastblock = 0;
710 bool isBeyondEOF;
711
712 *err = 0;
713 *new = 0;
714 prev_epos.offset = udf_file_entry_alloc_offset(inode);
715 prev_epos.block = iinfo->i_location;
716 prev_epos.bh = NULL;
717 cur_epos = next_epos = prev_epos;
718 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
719
720 /* find the extent which contains the block we are looking for.
721 alternate between laarr[0] and laarr[1] for locations of the
722 current extent, and the previous extent */
723 do {
724 if (prev_epos.bh != cur_epos.bh) {
725 brelse(prev_epos.bh);
726 get_bh(cur_epos.bh);
727 prev_epos.bh = cur_epos.bh;
728 }
729 if (cur_epos.bh != next_epos.bh) {
730 brelse(cur_epos.bh);
731 get_bh(next_epos.bh);
732 cur_epos.bh = next_epos.bh;
733 }
734
735 lbcount += elen;
736
737 prev_epos.block = cur_epos.block;
738 cur_epos.block = next_epos.block;
739
740 prev_epos.offset = cur_epos.offset;
741 cur_epos.offset = next_epos.offset;
742
743 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
744 if (etype == -1)
745 break;
746
747 c = !c;
748
749 laarr[c].extLength = (etype << 30) | elen;
750 laarr[c].extLocation = eloc;
751
752 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
753 pgoal = eloc.logicalBlockNum +
754 ((elen + inode->i_sb->s_blocksize - 1) >>
755 inode->i_sb->s_blocksize_bits);
756
757 count++;
758 } while (lbcount + elen <= b_off);
759
760 b_off -= lbcount;
761 offset = b_off >> inode->i_sb->s_blocksize_bits;
762 /*
763 * Move prev_epos and cur_epos into indirect extent if we are at
764 * the pointer to it
765 */
766 udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
767 udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
768
769 /* if the extent is allocated and recorded, return the block
770 if the extent is not a multiple of the blocksize, round up */
771
772 if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
773 if (elen & (inode->i_sb->s_blocksize - 1)) {
774 elen = EXT_RECORDED_ALLOCATED |
775 ((elen + inode->i_sb->s_blocksize - 1) &
776 ~(inode->i_sb->s_blocksize - 1));
777 udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
778 }
779 newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
780 goto out_free;
781 }
782
783 /* Are we beyond EOF? */
784 if (etype == -1) {
785 int ret;
David Brazdil0f672f62019-12-10 10:32:29 +0000786 loff_t hole_len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000787 isBeyondEOF = true;
788 if (count) {
789 if (c)
790 laarr[0] = laarr[1];
791 startnum = 1;
792 } else {
793 /* Create a fake extent when there's not one */
794 memset(&laarr[0].extLocation, 0x00,
795 sizeof(struct kernel_lb_addr));
796 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
797 /* Will udf_do_extend_file() create real extent from
798 a fake one? */
799 startnum = (offset > 0);
800 }
801 /* Create extents for the hole between EOF and offset */
David Brazdil0f672f62019-12-10 10:32:29 +0000802 hole_len = (loff_t)offset << inode->i_blkbits;
803 ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000804 if (ret < 0) {
805 *err = ret;
806 newblock = 0;
807 goto out_free;
808 }
809 c = 0;
810 offset = 0;
811 count += ret;
812 /* We are not covered by a preallocated extent? */
813 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
814 EXT_NOT_RECORDED_ALLOCATED) {
815 /* Is there any real extent? - otherwise we overwrite
816 * the fake one... */
817 if (count)
818 c = !c;
819 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
820 inode->i_sb->s_blocksize;
821 memset(&laarr[c].extLocation, 0x00,
822 sizeof(struct kernel_lb_addr));
823 count++;
824 }
825 endnum = c + 1;
826 lastblock = 1;
827 } else {
828 isBeyondEOF = false;
829 endnum = startnum = ((count > 2) ? 2 : count);
830
831 /* if the current extent is in position 0,
832 swap it with the previous */
833 if (!c && count != 1) {
834 laarr[2] = laarr[0];
835 laarr[0] = laarr[1];
836 laarr[1] = laarr[2];
837 c = 1;
838 }
839
840 /* if the current block is located in an extent,
841 read the next extent */
842 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
843 if (etype != -1) {
844 laarr[c + 1].extLength = (etype << 30) | elen;
845 laarr[c + 1].extLocation = eloc;
846 count++;
847 startnum++;
848 endnum++;
849 } else
850 lastblock = 1;
851 }
852
853 /* if the current extent is not recorded but allocated, get the
854 * block in the extent corresponding to the requested block */
855 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
856 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
857 else { /* otherwise, allocate a new block */
858 if (iinfo->i_next_alloc_block == block)
859 goal = iinfo->i_next_alloc_goal;
860
861 if (!goal) {
862 if (!(goal = pgoal)) /* XXX: what was intended here? */
863 goal = iinfo->i_location.logicalBlockNum + 1;
864 }
865
866 newblocknum = udf_new_block(inode->i_sb, inode,
867 iinfo->i_location.partitionReferenceNum,
868 goal, err);
869 if (!newblocknum) {
870 *err = -ENOSPC;
871 newblock = 0;
872 goto out_free;
873 }
874 if (isBeyondEOF)
875 iinfo->i_lenExtents += inode->i_sb->s_blocksize;
876 }
877
878 /* if the extent the requsted block is located in contains multiple
879 * blocks, split the extent into at most three extents. blocks prior
880 * to requested block, requested block, and blocks after requested
881 * block */
882 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
883
884 /* We preallocate blocks only for regular files. It also makes sense
885 * for directories but there's a problem when to drop the
886 * preallocation. We might use some delayed work for that but I feel
887 * it's overengineering for a filesystem like UDF. */
888 if (S_ISREG(inode->i_mode))
889 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
890
891 /* merge any continuous blocks in laarr */
892 udf_merge_extents(inode, laarr, &endnum);
893
894 /* write back the new extents, inserting new extents if the new number
895 * of extents is greater than the old number, and deleting extents if
896 * the new number of extents is less than the old number */
897 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
898
899 newblock = udf_get_pblock(inode->i_sb, newblocknum,
900 iinfo->i_location.partitionReferenceNum, 0);
901 if (!newblock) {
902 *err = -EIO;
903 goto out_free;
904 }
905 *new = 1;
906 iinfo->i_next_alloc_block = block;
907 iinfo->i_next_alloc_goal = newblocknum;
908 inode->i_ctime = current_time(inode);
909
910 if (IS_SYNC(inode))
911 udf_sync_inode(inode);
912 else
913 mark_inode_dirty(inode);
914out_free:
915 brelse(prev_epos.bh);
916 brelse(cur_epos.bh);
917 brelse(next_epos.bh);
918 return newblock;
919}
920
921static void udf_split_extents(struct inode *inode, int *c, int offset,
922 udf_pblk_t newblocknum,
923 struct kernel_long_ad *laarr, int *endnum)
924{
925 unsigned long blocksize = inode->i_sb->s_blocksize;
926 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
927
928 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
929 (laarr[*c].extLength >> 30) ==
930 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
931 int curr = *c;
932 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
933 blocksize - 1) >> blocksize_bits;
934 int8_t etype = (laarr[curr].extLength >> 30);
935
936 if (blen == 1)
937 ;
938 else if (!offset || blen == offset + 1) {
939 laarr[curr + 2] = laarr[curr + 1];
940 laarr[curr + 1] = laarr[curr];
941 } else {
942 laarr[curr + 3] = laarr[curr + 1];
943 laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
944 }
945
946 if (offset) {
947 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
948 udf_free_blocks(inode->i_sb, inode,
949 &laarr[curr].extLocation,
950 0, offset);
951 laarr[curr].extLength =
952 EXT_NOT_RECORDED_NOT_ALLOCATED |
953 (offset << blocksize_bits);
954 laarr[curr].extLocation.logicalBlockNum = 0;
955 laarr[curr].extLocation.
956 partitionReferenceNum = 0;
957 } else
958 laarr[curr].extLength = (etype << 30) |
959 (offset << blocksize_bits);
960 curr++;
961 (*c)++;
962 (*endnum)++;
963 }
964
965 laarr[curr].extLocation.logicalBlockNum = newblocknum;
966 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
967 laarr[curr].extLocation.partitionReferenceNum =
968 UDF_I(inode)->i_location.partitionReferenceNum;
969 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
970 blocksize;
971 curr++;
972
973 if (blen != offset + 1) {
974 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
975 laarr[curr].extLocation.logicalBlockNum +=
976 offset + 1;
977 laarr[curr].extLength = (etype << 30) |
978 ((blen - (offset + 1)) << blocksize_bits);
979 curr++;
980 (*endnum)++;
981 }
982 }
983}
984
985static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
986 struct kernel_long_ad *laarr,
987 int *endnum)
988{
989 int start, length = 0, currlength = 0, i;
990
991 if (*endnum >= (c + 1)) {
992 if (!lastblock)
993 return;
994 else
995 start = c;
996 } else {
997 if ((laarr[c + 1].extLength >> 30) ==
998 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
999 start = c + 1;
1000 length = currlength =
1001 (((laarr[c + 1].extLength &
1002 UDF_EXTENT_LENGTH_MASK) +
1003 inode->i_sb->s_blocksize - 1) >>
1004 inode->i_sb->s_blocksize_bits);
1005 } else
1006 start = c;
1007 }
1008
1009 for (i = start + 1; i <= *endnum; i++) {
1010 if (i == *endnum) {
1011 if (lastblock)
1012 length += UDF_DEFAULT_PREALLOC_BLOCKS;
1013 } else if ((laarr[i].extLength >> 30) ==
1014 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
1015 length += (((laarr[i].extLength &
1016 UDF_EXTENT_LENGTH_MASK) +
1017 inode->i_sb->s_blocksize - 1) >>
1018 inode->i_sb->s_blocksize_bits);
1019 } else
1020 break;
1021 }
1022
1023 if (length) {
1024 int next = laarr[start].extLocation.logicalBlockNum +
1025 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
1026 inode->i_sb->s_blocksize - 1) >>
1027 inode->i_sb->s_blocksize_bits);
1028 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
1029 laarr[start].extLocation.partitionReferenceNum,
1030 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
1031 length : UDF_DEFAULT_PREALLOC_BLOCKS) -
1032 currlength);
1033 if (numalloc) {
1034 if (start == (c + 1))
1035 laarr[start].extLength +=
1036 (numalloc <<
1037 inode->i_sb->s_blocksize_bits);
1038 else {
1039 memmove(&laarr[c + 2], &laarr[c + 1],
1040 sizeof(struct long_ad) * (*endnum - (c + 1)));
1041 (*endnum)++;
1042 laarr[c + 1].extLocation.logicalBlockNum = next;
1043 laarr[c + 1].extLocation.partitionReferenceNum =
1044 laarr[c].extLocation.
1045 partitionReferenceNum;
1046 laarr[c + 1].extLength =
1047 EXT_NOT_RECORDED_ALLOCATED |
1048 (numalloc <<
1049 inode->i_sb->s_blocksize_bits);
1050 start = c + 1;
1051 }
1052
1053 for (i = start + 1; numalloc && i < *endnum; i++) {
1054 int elen = ((laarr[i].extLength &
1055 UDF_EXTENT_LENGTH_MASK) +
1056 inode->i_sb->s_blocksize - 1) >>
1057 inode->i_sb->s_blocksize_bits;
1058
1059 if (elen > numalloc) {
1060 laarr[i].extLength -=
1061 (numalloc <<
1062 inode->i_sb->s_blocksize_bits);
1063 numalloc = 0;
1064 } else {
1065 numalloc -= elen;
1066 if (*endnum > (i + 1))
1067 memmove(&laarr[i],
1068 &laarr[i + 1],
1069 sizeof(struct long_ad) *
1070 (*endnum - (i + 1)));
1071 i--;
1072 (*endnum)--;
1073 }
1074 }
1075 UDF_I(inode)->i_lenExtents +=
1076 numalloc << inode->i_sb->s_blocksize_bits;
1077 }
1078 }
1079}
1080
1081static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr,
1082 int *endnum)
1083{
1084 int i;
1085 unsigned long blocksize = inode->i_sb->s_blocksize;
1086 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1087
1088 for (i = 0; i < (*endnum - 1); i++) {
1089 struct kernel_long_ad *li /*l[i]*/ = &laarr[i];
1090 struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
1091
1092 if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
1093 (((li->extLength >> 30) ==
1094 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
1095 ((lip1->extLocation.logicalBlockNum -
1096 li->extLocation.logicalBlockNum) ==
1097 (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1098 blocksize - 1) >> blocksize_bits)))) {
1099
1100 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1101 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
1102 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
1103 lip1->extLength = (lip1->extLength -
1104 (li->extLength &
1105 UDF_EXTENT_LENGTH_MASK) +
1106 UDF_EXTENT_LENGTH_MASK) &
1107 ~(blocksize - 1);
1108 li->extLength = (li->extLength &
1109 UDF_EXTENT_FLAG_MASK) +
1110 (UDF_EXTENT_LENGTH_MASK + 1) -
1111 blocksize;
1112 lip1->extLocation.logicalBlockNum =
1113 li->extLocation.logicalBlockNum +
1114 ((li->extLength &
1115 UDF_EXTENT_LENGTH_MASK) >>
1116 blocksize_bits);
1117 } else {
1118 li->extLength = lip1->extLength +
1119 (((li->extLength &
1120 UDF_EXTENT_LENGTH_MASK) +
1121 blocksize - 1) & ~(blocksize - 1));
1122 if (*endnum > (i + 2))
1123 memmove(&laarr[i + 1], &laarr[i + 2],
1124 sizeof(struct long_ad) *
1125 (*endnum - (i + 2)));
1126 i--;
1127 (*endnum)--;
1128 }
1129 } else if (((li->extLength >> 30) ==
1130 (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
1131 ((lip1->extLength >> 30) ==
1132 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
1133 udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0,
1134 ((li->extLength &
1135 UDF_EXTENT_LENGTH_MASK) +
1136 blocksize - 1) >> blocksize_bits);
1137 li->extLocation.logicalBlockNum = 0;
1138 li->extLocation.partitionReferenceNum = 0;
1139
1140 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1141 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
1142 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
1143 lip1->extLength = (lip1->extLength -
1144 (li->extLength &
1145 UDF_EXTENT_LENGTH_MASK) +
1146 UDF_EXTENT_LENGTH_MASK) &
1147 ~(blocksize - 1);
1148 li->extLength = (li->extLength &
1149 UDF_EXTENT_FLAG_MASK) +
1150 (UDF_EXTENT_LENGTH_MASK + 1) -
1151 blocksize;
1152 } else {
1153 li->extLength = lip1->extLength +
1154 (((li->extLength &
1155 UDF_EXTENT_LENGTH_MASK) +
1156 blocksize - 1) & ~(blocksize - 1));
1157 if (*endnum > (i + 2))
1158 memmove(&laarr[i + 1], &laarr[i + 2],
1159 sizeof(struct long_ad) *
1160 (*endnum - (i + 2)));
1161 i--;
1162 (*endnum)--;
1163 }
1164 } else if ((li->extLength >> 30) ==
1165 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
1166 udf_free_blocks(inode->i_sb, inode,
1167 &li->extLocation, 0,
1168 ((li->extLength &
1169 UDF_EXTENT_LENGTH_MASK) +
1170 blocksize - 1) >> blocksize_bits);
1171 li->extLocation.logicalBlockNum = 0;
1172 li->extLocation.partitionReferenceNum = 0;
1173 li->extLength = (li->extLength &
1174 UDF_EXTENT_LENGTH_MASK) |
1175 EXT_NOT_RECORDED_NOT_ALLOCATED;
1176 }
1177 }
1178}
1179
1180static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr,
1181 int startnum, int endnum,
1182 struct extent_position *epos)
1183{
1184 int start = 0, i;
1185 struct kernel_lb_addr tmploc;
1186 uint32_t tmplen;
1187
1188 if (startnum > endnum) {
1189 for (i = 0; i < (startnum - endnum); i++)
1190 udf_delete_aext(inode, *epos);
1191 } else if (startnum < endnum) {
1192 for (i = 0; i < (endnum - startnum); i++) {
1193 udf_insert_aext(inode, *epos, laarr[i].extLocation,
1194 laarr[i].extLength);
1195 udf_next_aext(inode, epos, &laarr[i].extLocation,
1196 &laarr[i].extLength, 1);
1197 start++;
1198 }
1199 }
1200
1201 for (i = start; i < endnum; i++) {
1202 udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
1203 udf_write_aext(inode, epos, &laarr[i].extLocation,
1204 laarr[i].extLength, 1);
1205 }
1206}
1207
1208struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
1209 int create, int *err)
1210{
1211 struct buffer_head *bh = NULL;
1212
1213 bh = udf_getblk(inode, block, create, err);
1214 if (!bh)
1215 return NULL;
1216
1217 if (buffer_uptodate(bh))
1218 return bh;
1219
1220 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1221
1222 wait_on_buffer(bh);
1223 if (buffer_uptodate(bh))
1224 return bh;
1225
1226 brelse(bh);
1227 *err = -EIO;
1228 return NULL;
1229}
1230
1231int udf_setsize(struct inode *inode, loff_t newsize)
1232{
1233 int err;
1234 struct udf_inode_info *iinfo;
1235 unsigned int bsize = i_blocksize(inode);
1236
1237 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1238 S_ISLNK(inode->i_mode)))
1239 return -EINVAL;
1240 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1241 return -EPERM;
1242
1243 iinfo = UDF_I(inode);
1244 if (newsize > inode->i_size) {
1245 down_write(&iinfo->i_data_sem);
1246 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1247 if (bsize <
1248 (udf_file_entry_alloc_offset(inode) + newsize)) {
1249 err = udf_expand_file_adinicb(inode);
1250 if (err)
1251 return err;
1252 down_write(&iinfo->i_data_sem);
1253 } else {
1254 iinfo->i_lenAlloc = newsize;
1255 goto set_size;
1256 }
1257 }
1258 err = udf_extend_file(inode, newsize);
1259 if (err) {
1260 up_write(&iinfo->i_data_sem);
1261 return err;
1262 }
1263set_size:
1264 up_write(&iinfo->i_data_sem);
1265 truncate_setsize(inode, newsize);
1266 } else {
1267 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1268 down_write(&iinfo->i_data_sem);
1269 udf_clear_extent_cache(inode);
1270 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize,
1271 0x00, bsize - newsize -
1272 udf_file_entry_alloc_offset(inode));
1273 iinfo->i_lenAlloc = newsize;
1274 truncate_setsize(inode, newsize);
1275 up_write(&iinfo->i_data_sem);
1276 goto update_time;
1277 }
1278 err = block_truncate_page(inode->i_mapping, newsize,
1279 udf_get_block);
1280 if (err)
1281 return err;
1282 truncate_setsize(inode, newsize);
1283 down_write(&iinfo->i_data_sem);
1284 udf_clear_extent_cache(inode);
David Brazdil0f672f62019-12-10 10:32:29 +00001285 err = udf_truncate_extents(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001286 up_write(&iinfo->i_data_sem);
David Brazdil0f672f62019-12-10 10:32:29 +00001287 if (err)
1288 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001289 }
1290update_time:
1291 inode->i_mtime = inode->i_ctime = current_time(inode);
1292 if (IS_SYNC(inode))
1293 udf_sync_inode(inode);
1294 else
1295 mark_inode_dirty(inode);
1296 return 0;
1297}
1298
1299/*
1300 * Maximum length of linked list formed by ICB hierarchy. The chosen number is
1301 * arbitrary - just that we hopefully don't limit any real use of rewritten
1302 * inode on write-once media but avoid looping for too long on corrupted media.
1303 */
1304#define UDF_MAX_ICB_NESTING 1024
1305
1306static int udf_read_inode(struct inode *inode, bool hidden_inode)
1307{
1308 struct buffer_head *bh = NULL;
1309 struct fileEntry *fe;
1310 struct extendedFileEntry *efe;
1311 uint16_t ident;
1312 struct udf_inode_info *iinfo = UDF_I(inode);
1313 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1314 struct kernel_lb_addr *iloc = &iinfo->i_location;
1315 unsigned int link_count;
1316 unsigned int indirections = 0;
1317 int bs = inode->i_sb->s_blocksize;
1318 int ret = -EIO;
1319 uint32_t uid, gid;
1320
1321reread:
1322 if (iloc->partitionReferenceNum >= sbi->s_partitions) {
1323 udf_debug("partition reference: %u > logical volume partitions: %u\n",
1324 iloc->partitionReferenceNum, sbi->s_partitions);
1325 return -EIO;
1326 }
1327
1328 if (iloc->logicalBlockNum >=
1329 sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
1330 udf_debug("block=%u, partition=%u out of range\n",
1331 iloc->logicalBlockNum, iloc->partitionReferenceNum);
1332 return -EIO;
1333 }
1334
1335 /*
1336 * Set defaults, but the inode is still incomplete!
1337 * Note: get_new_inode() sets the following on a new inode:
1338 * i_sb = sb
1339 * i_no = ino
1340 * i_flags = sb->s_flags
1341 * i_state = 0
1342 * clean_inode(): zero fills and sets
1343 * i_count = 1
1344 * i_nlink = 1
1345 * i_op = NULL;
1346 */
1347 bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
1348 if (!bh) {
1349 udf_err(inode->i_sb, "(ino %lu) failed !bh\n", inode->i_ino);
1350 return -EIO;
1351 }
1352
1353 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1354 ident != TAG_IDENT_USE) {
1355 udf_err(inode->i_sb, "(ino %lu) failed ident=%u\n",
1356 inode->i_ino, ident);
1357 goto out;
1358 }
1359
1360 fe = (struct fileEntry *)bh->b_data;
1361 efe = (struct extendedFileEntry *)bh->b_data;
1362
1363 if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
1364 struct buffer_head *ibh;
1365
1366 ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
1367 if (ident == TAG_IDENT_IE && ibh) {
1368 struct kernel_lb_addr loc;
1369 struct indirectEntry *ie;
1370
1371 ie = (struct indirectEntry *)ibh->b_data;
1372 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1373
1374 if (ie->indirectICB.extLength) {
1375 brelse(ibh);
1376 memcpy(&iinfo->i_location, &loc,
1377 sizeof(struct kernel_lb_addr));
1378 if (++indirections > UDF_MAX_ICB_NESTING) {
1379 udf_err(inode->i_sb,
1380 "too many ICBs in ICB hierarchy"
1381 " (max %d supported)\n",
1382 UDF_MAX_ICB_NESTING);
1383 goto out;
1384 }
1385 brelse(bh);
1386 goto reread;
1387 }
1388 }
1389 brelse(ibh);
1390 } else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
1391 udf_err(inode->i_sb, "unsupported strategy type: %u\n",
1392 le16_to_cpu(fe->icbTag.strategyType));
1393 goto out;
1394 }
1395 if (fe->icbTag.strategyType == cpu_to_le16(4))
1396 iinfo->i_strat4096 = 0;
1397 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1398 iinfo->i_strat4096 = 1;
1399
1400 iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
1401 ICBTAG_FLAG_AD_MASK;
David Brazdil0f672f62019-12-10 10:32:29 +00001402 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
1403 iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
1404 iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1405 ret = -EIO;
1406 goto out;
1407 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001408 iinfo->i_unique = 0;
1409 iinfo->i_lenEAttr = 0;
1410 iinfo->i_lenExtents = 0;
1411 iinfo->i_lenAlloc = 0;
1412 iinfo->i_next_alloc_block = 0;
1413 iinfo->i_next_alloc_goal = 0;
1414 if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
1415 iinfo->i_efe = 1;
1416 iinfo->i_use = 0;
1417 ret = udf_alloc_i_data(inode, bs -
1418 sizeof(struct extendedFileEntry));
1419 if (ret)
1420 goto out;
1421 memcpy(iinfo->i_ext.i_data,
1422 bh->b_data + sizeof(struct extendedFileEntry),
1423 bs - sizeof(struct extendedFileEntry));
1424 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
1425 iinfo->i_efe = 0;
1426 iinfo->i_use = 0;
1427 ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
1428 if (ret)
1429 goto out;
1430 memcpy(iinfo->i_ext.i_data,
1431 bh->b_data + sizeof(struct fileEntry),
1432 bs - sizeof(struct fileEntry));
1433 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1434 iinfo->i_efe = 0;
1435 iinfo->i_use = 1;
1436 iinfo->i_lenAlloc = le32_to_cpu(
1437 ((struct unallocSpaceEntry *)bh->b_data)->
1438 lengthAllocDescs);
1439 ret = udf_alloc_i_data(inode, bs -
1440 sizeof(struct unallocSpaceEntry));
1441 if (ret)
1442 goto out;
1443 memcpy(iinfo->i_ext.i_data,
1444 bh->b_data + sizeof(struct unallocSpaceEntry),
1445 bs - sizeof(struct unallocSpaceEntry));
1446 return 0;
1447 }
1448
1449 ret = -EIO;
1450 read_lock(&sbi->s_cred_lock);
1451 uid = le32_to_cpu(fe->uid);
1452 if (uid == UDF_INVALID_ID ||
1453 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
1454 inode->i_uid = sbi->s_uid;
1455 else
1456 i_uid_write(inode, uid);
1457
1458 gid = le32_to_cpu(fe->gid);
1459 if (gid == UDF_INVALID_ID ||
1460 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
1461 inode->i_gid = sbi->s_gid;
1462 else
1463 i_gid_write(inode, gid);
1464
1465 if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
1466 sbi->s_fmode != UDF_INVALID_MODE)
1467 inode->i_mode = sbi->s_fmode;
1468 else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
1469 sbi->s_dmode != UDF_INVALID_MODE)
1470 inode->i_mode = sbi->s_dmode;
1471 else
1472 inode->i_mode = udf_convert_permissions(fe);
1473 inode->i_mode &= ~sbi->s_umask;
David Brazdil0f672f62019-12-10 10:32:29 +00001474 iinfo->i_extraPerms = le32_to_cpu(fe->permissions) & ~FE_MAPPED_PERMS;
1475
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001476 read_unlock(&sbi->s_cred_lock);
1477
1478 link_count = le16_to_cpu(fe->fileLinkCount);
1479 if (!link_count) {
1480 if (!hidden_inode) {
1481 ret = -ESTALE;
1482 goto out;
1483 }
1484 link_count = 1;
1485 }
1486 set_nlink(inode, link_count);
1487
1488 inode->i_size = le64_to_cpu(fe->informationLength);
1489 iinfo->i_lenExtents = inode->i_size;
1490
1491 if (iinfo->i_efe == 0) {
1492 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1493 (inode->i_sb->s_blocksize_bits - 9);
1494
1495 udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime);
1496 udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime);
1497 udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime);
1498
1499 iinfo->i_unique = le64_to_cpu(fe->uniqueID);
1500 iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
1501 iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
1502 iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
David Brazdil0f672f62019-12-10 10:32:29 +00001503 iinfo->i_streamdir = 0;
1504 iinfo->i_lenStreams = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001505 } else {
1506 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1507 (inode->i_sb->s_blocksize_bits - 9);
1508
1509 udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime);
1510 udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime);
1511 udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime);
1512 udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime);
1513
1514 iinfo->i_unique = le64_to_cpu(efe->uniqueID);
1515 iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
1516 iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
1517 iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
David Brazdil0f672f62019-12-10 10:32:29 +00001518
1519 /* Named streams */
1520 iinfo->i_streamdir = (efe->streamDirectoryICB.extLength != 0);
1521 iinfo->i_locStreamdir =
1522 lelb_to_cpu(efe->streamDirectoryICB.extLocation);
1523 iinfo->i_lenStreams = le64_to_cpu(efe->objectSize);
1524 if (iinfo->i_lenStreams >= inode->i_size)
1525 iinfo->i_lenStreams -= inode->i_size;
1526 else
1527 iinfo->i_lenStreams = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001528 }
1529 inode->i_generation = iinfo->i_unique;
1530
1531 /*
1532 * Sanity check length of allocation descriptors and extended attrs to
1533 * avoid integer overflows
1534 */
1535 if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
1536 goto out;
1537 /* Now do exact checks */
1538 if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
1539 goto out;
1540 /* Sanity checks for files in ICB so that we don't get confused later */
1541 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1542 /*
1543 * For file in ICB data is stored in allocation descriptor
1544 * so sizes should match
1545 */
1546 if (iinfo->i_lenAlloc != inode->i_size)
1547 goto out;
1548 /* File in ICB has to fit in there... */
1549 if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
1550 goto out;
1551 }
1552
1553 switch (fe->icbTag.fileType) {
1554 case ICBTAG_FILE_TYPE_DIRECTORY:
1555 inode->i_op = &udf_dir_inode_operations;
1556 inode->i_fop = &udf_dir_operations;
1557 inode->i_mode |= S_IFDIR;
1558 inc_nlink(inode);
1559 break;
1560 case ICBTAG_FILE_TYPE_REALTIME:
1561 case ICBTAG_FILE_TYPE_REGULAR:
1562 case ICBTAG_FILE_TYPE_UNDEF:
1563 case ICBTAG_FILE_TYPE_VAT20:
1564 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1565 inode->i_data.a_ops = &udf_adinicb_aops;
1566 else
1567 inode->i_data.a_ops = &udf_aops;
1568 inode->i_op = &udf_file_inode_operations;
1569 inode->i_fop = &udf_file_operations;
1570 inode->i_mode |= S_IFREG;
1571 break;
1572 case ICBTAG_FILE_TYPE_BLOCK:
1573 inode->i_mode |= S_IFBLK;
1574 break;
1575 case ICBTAG_FILE_TYPE_CHAR:
1576 inode->i_mode |= S_IFCHR;
1577 break;
1578 case ICBTAG_FILE_TYPE_FIFO:
1579 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1580 break;
1581 case ICBTAG_FILE_TYPE_SOCKET:
1582 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1583 break;
1584 case ICBTAG_FILE_TYPE_SYMLINK:
1585 inode->i_data.a_ops = &udf_symlink_aops;
1586 inode->i_op = &udf_symlink_inode_operations;
1587 inode_nohighmem(inode);
1588 inode->i_mode = S_IFLNK | 0777;
1589 break;
1590 case ICBTAG_FILE_TYPE_MAIN:
1591 udf_debug("METADATA FILE-----\n");
1592 break;
1593 case ICBTAG_FILE_TYPE_MIRROR:
1594 udf_debug("METADATA MIRROR FILE-----\n");
1595 break;
1596 case ICBTAG_FILE_TYPE_BITMAP:
1597 udf_debug("METADATA BITMAP FILE-----\n");
1598 break;
1599 default:
1600 udf_err(inode->i_sb, "(ino %lu) failed unknown file type=%u\n",
1601 inode->i_ino, fe->icbTag.fileType);
1602 goto out;
1603 }
1604 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1605 struct deviceSpec *dsea =
1606 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1607 if (dsea) {
1608 init_special_inode(inode, inode->i_mode,
1609 MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
1610 le32_to_cpu(dsea->minorDeviceIdent)));
1611 /* Developer ID ??? */
1612 } else
1613 goto out;
1614 }
1615 ret = 0;
1616out:
1617 brelse(bh);
1618 return ret;
1619}
1620
1621static int udf_alloc_i_data(struct inode *inode, size_t size)
1622{
1623 struct udf_inode_info *iinfo = UDF_I(inode);
1624 iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL);
1625 if (!iinfo->i_ext.i_data)
1626 return -ENOMEM;
1627 return 0;
1628}
1629
1630static umode_t udf_convert_permissions(struct fileEntry *fe)
1631{
1632 umode_t mode;
1633 uint32_t permissions;
1634 uint32_t flags;
1635
1636 permissions = le32_to_cpu(fe->permissions);
1637 flags = le16_to_cpu(fe->icbTag.flags);
1638
1639 mode = ((permissions) & 0007) |
1640 ((permissions >> 2) & 0070) |
1641 ((permissions >> 4) & 0700) |
1642 ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1643 ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1644 ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1645
1646 return mode;
1647}
1648
David Brazdil0f672f62019-12-10 10:32:29 +00001649void udf_update_extra_perms(struct inode *inode, umode_t mode)
1650{
1651 struct udf_inode_info *iinfo = UDF_I(inode);
1652
1653 /*
1654 * UDF 2.01 sec. 3.3.3.3 Note 2:
1655 * In Unix, delete permission tracks write
1656 */
1657 iinfo->i_extraPerms &= ~FE_DELETE_PERMS;
1658 if (mode & 0200)
1659 iinfo->i_extraPerms |= FE_PERM_U_DELETE;
1660 if (mode & 0020)
1661 iinfo->i_extraPerms |= FE_PERM_G_DELETE;
1662 if (mode & 0002)
1663 iinfo->i_extraPerms |= FE_PERM_O_DELETE;
1664}
1665
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001666int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
1667{
1668 return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1669}
1670
1671static int udf_sync_inode(struct inode *inode)
1672{
1673 return udf_update_inode(inode, 1);
1674}
1675
1676static void udf_adjust_time(struct udf_inode_info *iinfo, struct timespec64 time)
1677{
1678 if (iinfo->i_crtime.tv_sec > time.tv_sec ||
1679 (iinfo->i_crtime.tv_sec == time.tv_sec &&
1680 iinfo->i_crtime.tv_nsec > time.tv_nsec))
1681 iinfo->i_crtime = time;
1682}
1683
1684static int udf_update_inode(struct inode *inode, int do_sync)
1685{
1686 struct buffer_head *bh = NULL;
1687 struct fileEntry *fe;
1688 struct extendedFileEntry *efe;
1689 uint64_t lb_recorded;
1690 uint32_t udfperms;
1691 uint16_t icbflags;
1692 uint16_t crclen;
1693 int err = 0;
1694 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1695 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1696 struct udf_inode_info *iinfo = UDF_I(inode);
1697
1698 bh = udf_tgetblk(inode->i_sb,
1699 udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0));
1700 if (!bh) {
1701 udf_debug("getblk failure\n");
1702 return -EIO;
1703 }
1704
1705 lock_buffer(bh);
1706 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1707 fe = (struct fileEntry *)bh->b_data;
1708 efe = (struct extendedFileEntry *)bh->b_data;
1709
1710 if (iinfo->i_use) {
1711 struct unallocSpaceEntry *use =
1712 (struct unallocSpaceEntry *)bh->b_data;
1713
1714 use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1715 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
1716 iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
1717 sizeof(struct unallocSpaceEntry));
1718 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
1719 crclen = sizeof(struct unallocSpaceEntry);
1720
1721 goto finish;
1722 }
1723
1724 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1725 fe->uid = cpu_to_le32(UDF_INVALID_ID);
1726 else
1727 fe->uid = cpu_to_le32(i_uid_read(inode));
1728
1729 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1730 fe->gid = cpu_to_le32(UDF_INVALID_ID);
1731 else
1732 fe->gid = cpu_to_le32(i_gid_read(inode));
1733
1734 udfperms = ((inode->i_mode & 0007)) |
1735 ((inode->i_mode & 0070) << 2) |
1736 ((inode->i_mode & 0700) << 4);
1737
David Brazdil0f672f62019-12-10 10:32:29 +00001738 udfperms |= iinfo->i_extraPerms;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001739 fe->permissions = cpu_to_le32(udfperms);
1740
1741 if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
1742 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1743 else
1744 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1745
1746 fe->informationLength = cpu_to_le64(inode->i_size);
1747
1748 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1749 struct regid *eid;
1750 struct deviceSpec *dsea =
1751 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1752 if (!dsea) {
1753 dsea = (struct deviceSpec *)
1754 udf_add_extendedattr(inode,
1755 sizeof(struct deviceSpec) +
1756 sizeof(struct regid), 12, 0x3);
1757 dsea->attrType = cpu_to_le32(12);
1758 dsea->attrSubtype = 1;
1759 dsea->attrLength = cpu_to_le32(
1760 sizeof(struct deviceSpec) +
1761 sizeof(struct regid));
1762 dsea->impUseLength = cpu_to_le32(sizeof(struct regid));
1763 }
1764 eid = (struct regid *)dsea->impUse;
1765 memset(eid, 0, sizeof(*eid));
1766 strcpy(eid->ident, UDF_ID_DEVELOPER);
1767 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1768 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1769 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1770 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1771 }
1772
1773 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1774 lb_recorded = 0; /* No extents => no blocks! */
1775 else
1776 lb_recorded =
1777 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1778 (blocksize_bits - 9);
1779
1780 if (iinfo->i_efe == 0) {
1781 memcpy(bh->b_data + sizeof(struct fileEntry),
1782 iinfo->i_ext.i_data,
1783 inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1784 fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
1785
1786 udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
1787 udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
1788 udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
1789 memset(&(fe->impIdent), 0, sizeof(struct regid));
1790 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1791 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1792 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1793 fe->uniqueID = cpu_to_le64(iinfo->i_unique);
1794 fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1795 fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1796 fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
1797 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1798 crclen = sizeof(struct fileEntry);
1799 } else {
1800 memcpy(bh->b_data + sizeof(struct extendedFileEntry),
1801 iinfo->i_ext.i_data,
1802 inode->i_sb->s_blocksize -
1803 sizeof(struct extendedFileEntry));
David Brazdil0f672f62019-12-10 10:32:29 +00001804 efe->objectSize =
1805 cpu_to_le64(inode->i_size + iinfo->i_lenStreams);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001806 efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
1807
David Brazdil0f672f62019-12-10 10:32:29 +00001808 if (iinfo->i_streamdir) {
1809 struct long_ad *icb_lad = &efe->streamDirectoryICB;
1810
1811 icb_lad->extLocation =
1812 cpu_to_lelb(iinfo->i_locStreamdir);
1813 icb_lad->extLength =
1814 cpu_to_le32(inode->i_sb->s_blocksize);
1815 }
1816
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001817 udf_adjust_time(iinfo, inode->i_atime);
1818 udf_adjust_time(iinfo, inode->i_mtime);
1819 udf_adjust_time(iinfo, inode->i_ctime);
1820
1821 udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
1822 udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
1823 udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
1824 udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
1825
1826 memset(&(efe->impIdent), 0, sizeof(efe->impIdent));
1827 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1828 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1829 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1830 efe->uniqueID = cpu_to_le64(iinfo->i_unique);
1831 efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1832 efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1833 efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
1834 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1835 crclen = sizeof(struct extendedFileEntry);
1836 }
1837
1838finish:
1839 if (iinfo->i_strat4096) {
1840 fe->icbTag.strategyType = cpu_to_le16(4096);
1841 fe->icbTag.strategyParameter = cpu_to_le16(1);
1842 fe->icbTag.numEntries = cpu_to_le16(2);
1843 } else {
1844 fe->icbTag.strategyType = cpu_to_le16(4);
1845 fe->icbTag.numEntries = cpu_to_le16(1);
1846 }
1847
1848 if (iinfo->i_use)
1849 fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
1850 else if (S_ISDIR(inode->i_mode))
1851 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1852 else if (S_ISREG(inode->i_mode))
1853 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1854 else if (S_ISLNK(inode->i_mode))
1855 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1856 else if (S_ISBLK(inode->i_mode))
1857 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1858 else if (S_ISCHR(inode->i_mode))
1859 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1860 else if (S_ISFIFO(inode->i_mode))
1861 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1862 else if (S_ISSOCK(inode->i_mode))
1863 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1864
1865 icbflags = iinfo->i_alloc_type |
1866 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1867 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1868 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1869 (le16_to_cpu(fe->icbTag.flags) &
1870 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1871 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1872
1873 fe->icbTag.flags = cpu_to_le16(icbflags);
1874 if (sbi->s_udfrev >= 0x0200)
1875 fe->descTag.descVersion = cpu_to_le16(3);
1876 else
1877 fe->descTag.descVersion = cpu_to_le16(2);
1878 fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
1879 fe->descTag.tagLocation = cpu_to_le32(
1880 iinfo->i_location.logicalBlockNum);
1881 crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag);
1882 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1883 fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
1884 crclen));
1885 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1886
1887 set_buffer_uptodate(bh);
1888 unlock_buffer(bh);
1889
1890 /* write the data blocks */
1891 mark_buffer_dirty(bh);
1892 if (do_sync) {
1893 sync_dirty_buffer(bh);
1894 if (buffer_write_io_error(bh)) {
1895 udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n",
1896 inode->i_ino);
1897 err = -EIO;
1898 }
1899 }
1900 brelse(bh);
1901
1902 return err;
1903}
1904
1905struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
1906 bool hidden_inode)
1907{
1908 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1909 struct inode *inode = iget_locked(sb, block);
1910 int err;
1911
1912 if (!inode)
1913 return ERR_PTR(-ENOMEM);
1914
1915 if (!(inode->i_state & I_NEW))
1916 return inode;
1917
1918 memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
1919 err = udf_read_inode(inode, hidden_inode);
1920 if (err < 0) {
1921 iget_failed(inode);
1922 return ERR_PTR(err);
1923 }
1924 unlock_new_inode(inode);
1925
1926 return inode;
1927}
1928
1929int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
1930 struct extent_position *epos)
1931{
1932 struct super_block *sb = inode->i_sb;
1933 struct buffer_head *bh;
1934 struct allocExtDesc *aed;
1935 struct extent_position nepos;
1936 struct kernel_lb_addr neloc;
1937 int ver, adsize;
1938
1939 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1940 adsize = sizeof(struct short_ad);
1941 else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1942 adsize = sizeof(struct long_ad);
1943 else
1944 return -EIO;
1945
1946 neloc.logicalBlockNum = block;
1947 neloc.partitionReferenceNum = epos->block.partitionReferenceNum;
1948
1949 bh = udf_tgetblk(sb, udf_get_lb_pblock(sb, &neloc, 0));
1950 if (!bh)
1951 return -EIO;
1952 lock_buffer(bh);
1953 memset(bh->b_data, 0x00, sb->s_blocksize);
1954 set_buffer_uptodate(bh);
1955 unlock_buffer(bh);
1956 mark_buffer_dirty_inode(bh, inode);
1957
1958 aed = (struct allocExtDesc *)(bh->b_data);
1959 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) {
1960 aed->previousAllocExtLocation =
1961 cpu_to_le32(epos->block.logicalBlockNum);
1962 }
1963 aed->lengthAllocDescs = cpu_to_le32(0);
1964 if (UDF_SB(sb)->s_udfrev >= 0x0200)
1965 ver = 3;
1966 else
1967 ver = 2;
1968 udf_new_tag(bh->b_data, TAG_IDENT_AED, ver, 1, block,
1969 sizeof(struct tag));
1970
1971 nepos.block = neloc;
1972 nepos.offset = sizeof(struct allocExtDesc);
1973 nepos.bh = bh;
1974
1975 /*
1976 * Do we have to copy current last extent to make space for indirect
1977 * one?
1978 */
1979 if (epos->offset + adsize > sb->s_blocksize) {
1980 struct kernel_lb_addr cp_loc;
1981 uint32_t cp_len;
1982 int cp_type;
1983
1984 epos->offset -= adsize;
1985 cp_type = udf_current_aext(inode, epos, &cp_loc, &cp_len, 0);
1986 cp_len |= ((uint32_t)cp_type) << 30;
1987
1988 __udf_add_aext(inode, &nepos, &cp_loc, cp_len, 1);
1989 udf_write_aext(inode, epos, &nepos.block,
1990 sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDECS, 0);
1991 } else {
1992 __udf_add_aext(inode, epos, &nepos.block,
1993 sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDECS, 0);
1994 }
1995
1996 brelse(epos->bh);
1997 *epos = nepos;
1998
1999 return 0;
2000}
2001
2002/*
2003 * Append extent at the given position - should be the first free one in inode
2004 * / indirect extent. This function assumes there is enough space in the inode
2005 * or indirect extent. Use udf_add_aext() if you didn't check for this before.
2006 */
2007int __udf_add_aext(struct inode *inode, struct extent_position *epos,
2008 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2009{
2010 struct udf_inode_info *iinfo = UDF_I(inode);
2011 struct allocExtDesc *aed;
2012 int adsize;
2013
2014 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2015 adsize = sizeof(struct short_ad);
2016 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2017 adsize = sizeof(struct long_ad);
2018 else
2019 return -EIO;
2020
2021 if (!epos->bh) {
2022 WARN_ON(iinfo->i_lenAlloc !=
2023 epos->offset - udf_file_entry_alloc_offset(inode));
2024 } else {
2025 aed = (struct allocExtDesc *)epos->bh->b_data;
2026 WARN_ON(le32_to_cpu(aed->lengthAllocDescs) !=
2027 epos->offset - sizeof(struct allocExtDesc));
2028 WARN_ON(epos->offset + adsize > inode->i_sb->s_blocksize);
2029 }
2030
2031 udf_write_aext(inode, epos, eloc, elen, inc);
2032
2033 if (!epos->bh) {
2034 iinfo->i_lenAlloc += adsize;
2035 mark_inode_dirty(inode);
2036 } else {
2037 aed = (struct allocExtDesc *)epos->bh->b_data;
2038 le32_add_cpu(&aed->lengthAllocDescs, adsize);
2039 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2040 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2041 udf_update_tag(epos->bh->b_data,
2042 epos->offset + (inc ? 0 : adsize));
2043 else
2044 udf_update_tag(epos->bh->b_data,
2045 sizeof(struct allocExtDesc));
2046 mark_buffer_dirty_inode(epos->bh, inode);
2047 }
2048
2049 return 0;
2050}
2051
2052/*
2053 * Append extent at given position - should be the first free one in inode
2054 * / indirect extent. Takes care of allocating and linking indirect blocks.
2055 */
2056int udf_add_aext(struct inode *inode, struct extent_position *epos,
2057 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2058{
2059 int adsize;
2060 struct super_block *sb = inode->i_sb;
2061
2062 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2063 adsize = sizeof(struct short_ad);
2064 else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2065 adsize = sizeof(struct long_ad);
2066 else
2067 return -EIO;
2068
2069 if (epos->offset + (2 * adsize) > sb->s_blocksize) {
2070 int err;
2071 udf_pblk_t new_block;
2072
2073 new_block = udf_new_block(sb, NULL,
2074 epos->block.partitionReferenceNum,
2075 epos->block.logicalBlockNum, &err);
2076 if (!new_block)
2077 return -ENOSPC;
2078
2079 err = udf_setup_indirect_aext(inode, new_block, epos);
2080 if (err)
2081 return err;
2082 }
2083
2084 return __udf_add_aext(inode, epos, eloc, elen, inc);
2085}
2086
2087void udf_write_aext(struct inode *inode, struct extent_position *epos,
2088 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2089{
2090 int adsize;
2091 uint8_t *ptr;
2092 struct short_ad *sad;
2093 struct long_ad *lad;
2094 struct udf_inode_info *iinfo = UDF_I(inode);
2095
2096 if (!epos->bh)
2097 ptr = iinfo->i_ext.i_data + epos->offset -
2098 udf_file_entry_alloc_offset(inode) +
2099 iinfo->i_lenEAttr;
2100 else
2101 ptr = epos->bh->b_data + epos->offset;
2102
2103 switch (iinfo->i_alloc_type) {
2104 case ICBTAG_FLAG_AD_SHORT:
2105 sad = (struct short_ad *)ptr;
2106 sad->extLength = cpu_to_le32(elen);
2107 sad->extPosition = cpu_to_le32(eloc->logicalBlockNum);
2108 adsize = sizeof(struct short_ad);
2109 break;
2110 case ICBTAG_FLAG_AD_LONG:
2111 lad = (struct long_ad *)ptr;
2112 lad->extLength = cpu_to_le32(elen);
2113 lad->extLocation = cpu_to_lelb(*eloc);
2114 memset(lad->impUse, 0x00, sizeof(lad->impUse));
2115 adsize = sizeof(struct long_ad);
2116 break;
2117 default:
2118 return;
2119 }
2120
2121 if (epos->bh) {
2122 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2123 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
2124 struct allocExtDesc *aed =
2125 (struct allocExtDesc *)epos->bh->b_data;
2126 udf_update_tag(epos->bh->b_data,
2127 le32_to_cpu(aed->lengthAllocDescs) +
2128 sizeof(struct allocExtDesc));
2129 }
2130 mark_buffer_dirty_inode(epos->bh, inode);
2131 } else {
2132 mark_inode_dirty(inode);
2133 }
2134
2135 if (inc)
2136 epos->offset += adsize;
2137}
2138
2139/*
2140 * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
2141 * someone does some weird stuff.
2142 */
2143#define UDF_MAX_INDIR_EXTS 16
2144
2145int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
2146 struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
2147{
2148 int8_t etype;
2149 unsigned int indirections = 0;
2150
2151 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
2152 (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
2153 udf_pblk_t block;
2154
2155 if (++indirections > UDF_MAX_INDIR_EXTS) {
2156 udf_err(inode->i_sb,
2157 "too many indirect extents in inode %lu\n",
2158 inode->i_ino);
2159 return -1;
2160 }
2161
2162 epos->block = *eloc;
2163 epos->offset = sizeof(struct allocExtDesc);
2164 brelse(epos->bh);
2165 block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
2166 epos->bh = udf_tread(inode->i_sb, block);
2167 if (!epos->bh) {
2168 udf_debug("reading block %u failed!\n", block);
2169 return -1;
2170 }
2171 }
2172
2173 return etype;
2174}
2175
2176int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
2177 struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
2178{
2179 int alen;
2180 int8_t etype;
2181 uint8_t *ptr;
2182 struct short_ad *sad;
2183 struct long_ad *lad;
2184 struct udf_inode_info *iinfo = UDF_I(inode);
2185
2186 if (!epos->bh) {
2187 if (!epos->offset)
2188 epos->offset = udf_file_entry_alloc_offset(inode);
2189 ptr = iinfo->i_ext.i_data + epos->offset -
2190 udf_file_entry_alloc_offset(inode) +
2191 iinfo->i_lenEAttr;
2192 alen = udf_file_entry_alloc_offset(inode) +
2193 iinfo->i_lenAlloc;
2194 } else {
2195 if (!epos->offset)
2196 epos->offset = sizeof(struct allocExtDesc);
2197 ptr = epos->bh->b_data + epos->offset;
2198 alen = sizeof(struct allocExtDesc) +
2199 le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
2200 lengthAllocDescs);
2201 }
2202
2203 switch (iinfo->i_alloc_type) {
2204 case ICBTAG_FLAG_AD_SHORT:
2205 sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
2206 if (!sad)
2207 return -1;
2208 etype = le32_to_cpu(sad->extLength) >> 30;
2209 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
2210 eloc->partitionReferenceNum =
2211 iinfo->i_location.partitionReferenceNum;
2212 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
2213 break;
2214 case ICBTAG_FLAG_AD_LONG:
2215 lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
2216 if (!lad)
2217 return -1;
2218 etype = le32_to_cpu(lad->extLength) >> 30;
2219 *eloc = lelb_to_cpu(lad->extLocation);
2220 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
2221 break;
2222 default:
2223 udf_debug("alloc_type = %u unsupported\n", iinfo->i_alloc_type);
2224 return -1;
2225 }
2226
2227 return etype;
2228}
2229
2230static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
2231 struct kernel_lb_addr neloc, uint32_t nelen)
2232{
2233 struct kernel_lb_addr oeloc;
2234 uint32_t oelen;
2235 int8_t etype;
2236
2237 if (epos.bh)
2238 get_bh(epos.bh);
2239
2240 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
2241 udf_write_aext(inode, &epos, &neloc, nelen, 1);
2242 neloc = oeloc;
2243 nelen = (etype << 30) | oelen;
2244 }
2245 udf_add_aext(inode, &epos, &neloc, nelen, 1);
2246 brelse(epos.bh);
2247
2248 return (nelen >> 30);
2249}
2250
2251int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
2252{
2253 struct extent_position oepos;
2254 int adsize;
2255 int8_t etype;
2256 struct allocExtDesc *aed;
2257 struct udf_inode_info *iinfo;
2258 struct kernel_lb_addr eloc;
2259 uint32_t elen;
2260
2261 if (epos.bh) {
2262 get_bh(epos.bh);
2263 get_bh(epos.bh);
2264 }
2265
2266 iinfo = UDF_I(inode);
2267 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2268 adsize = sizeof(struct short_ad);
2269 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2270 adsize = sizeof(struct long_ad);
2271 else
2272 adsize = 0;
2273
2274 oepos = epos;
2275 if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
2276 return -1;
2277
2278 while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
2279 udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
2280 if (oepos.bh != epos.bh) {
2281 oepos.block = epos.block;
2282 brelse(oepos.bh);
2283 get_bh(epos.bh);
2284 oepos.bh = epos.bh;
2285 oepos.offset = epos.offset - adsize;
2286 }
2287 }
2288 memset(&eloc, 0x00, sizeof(struct kernel_lb_addr));
2289 elen = 0;
2290
2291 if (epos.bh != oepos.bh) {
2292 udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
2293 udf_write_aext(inode, &oepos, &eloc, elen, 1);
2294 udf_write_aext(inode, &oepos, &eloc, elen, 1);
2295 if (!oepos.bh) {
2296 iinfo->i_lenAlloc -= (adsize * 2);
2297 mark_inode_dirty(inode);
2298 } else {
2299 aed = (struct allocExtDesc *)oepos.bh->b_data;
2300 le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
2301 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2302 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2303 udf_update_tag(oepos.bh->b_data,
2304 oepos.offset - (2 * adsize));
2305 else
2306 udf_update_tag(oepos.bh->b_data,
2307 sizeof(struct allocExtDesc));
2308 mark_buffer_dirty_inode(oepos.bh, inode);
2309 }
2310 } else {
2311 udf_write_aext(inode, &oepos, &eloc, elen, 1);
2312 if (!oepos.bh) {
2313 iinfo->i_lenAlloc -= adsize;
2314 mark_inode_dirty(inode);
2315 } else {
2316 aed = (struct allocExtDesc *)oepos.bh->b_data;
2317 le32_add_cpu(&aed->lengthAllocDescs, -adsize);
2318 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2319 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2320 udf_update_tag(oepos.bh->b_data,
2321 epos.offset - adsize);
2322 else
2323 udf_update_tag(oepos.bh->b_data,
2324 sizeof(struct allocExtDesc));
2325 mark_buffer_dirty_inode(oepos.bh, inode);
2326 }
2327 }
2328
2329 brelse(epos.bh);
2330 brelse(oepos.bh);
2331
2332 return (elen >> 30);
2333}
2334
2335int8_t inode_bmap(struct inode *inode, sector_t block,
2336 struct extent_position *pos, struct kernel_lb_addr *eloc,
2337 uint32_t *elen, sector_t *offset)
2338{
2339 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
2340 loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits;
2341 int8_t etype;
2342 struct udf_inode_info *iinfo;
2343
2344 iinfo = UDF_I(inode);
2345 if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
2346 pos->offset = 0;
2347 pos->block = iinfo->i_location;
2348 pos->bh = NULL;
2349 }
2350 *elen = 0;
2351 do {
2352 etype = udf_next_aext(inode, pos, eloc, elen, 1);
2353 if (etype == -1) {
2354 *offset = (bcount - lbcount) >> blocksize_bits;
2355 iinfo->i_lenExtents = lbcount;
2356 return -1;
2357 }
2358 lbcount += *elen;
2359 } while (lbcount <= bcount);
2360 /* update extent cache */
2361 udf_update_extent_cache(inode, lbcount - *elen, pos);
2362 *offset = (bcount + *elen - lbcount) >> blocksize_bits;
2363
2364 return etype;
2365}
2366
2367udf_pblk_t udf_block_map(struct inode *inode, sector_t block)
2368{
2369 struct kernel_lb_addr eloc;
2370 uint32_t elen;
2371 sector_t offset;
2372 struct extent_position epos = {};
2373 udf_pblk_t ret;
2374
2375 down_read(&UDF_I(inode)->i_data_sem);
2376
2377 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
2378 (EXT_RECORDED_ALLOCATED >> 30))
2379 ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
2380 else
2381 ret = 0;
2382
2383 up_read(&UDF_I(inode)->i_data_sem);
2384 brelse(epos.bh);
2385
2386 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2387 return udf_fixed_to_variable(ret);
2388 else
2389 return ret;
2390}