Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * linux/fs/ext4/xattr.c |
| 4 | * |
| 5 | * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> |
| 6 | * |
| 7 | * Fix by Harrison Xing <harrison@mountainviewdata.com>. |
| 8 | * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>. |
| 9 | * Extended attributes for symlinks and special files added per |
| 10 | * suggestion of Luka Renko <luka.renko@hermes.si>. |
| 11 | * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>, |
| 12 | * Red Hat Inc. |
| 13 | * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz |
| 14 | * and Andreas Gruenbacher <agruen@suse.de>. |
| 15 | */ |
| 16 | |
| 17 | /* |
| 18 | * Extended attributes are stored directly in inodes (on file systems with |
| 19 | * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl |
| 20 | * field contains the block number if an inode uses an additional block. All |
| 21 | * attributes must fit in the inode and one additional block. Blocks that |
| 22 | * contain the identical set of attributes may be shared among several inodes. |
| 23 | * Identical blocks are detected by keeping a cache of blocks that have |
| 24 | * recently been accessed. |
| 25 | * |
| 26 | * The attributes in inodes and on blocks have a different header; the entries |
| 27 | * are stored in the same format: |
| 28 | * |
| 29 | * +------------------+ |
| 30 | * | header | |
| 31 | * | entry 1 | | |
| 32 | * | entry 2 | | growing downwards |
| 33 | * | entry 3 | v |
| 34 | * | four null bytes | |
| 35 | * | . . . | |
| 36 | * | value 1 | ^ |
| 37 | * | value 3 | | growing upwards |
| 38 | * | value 2 | | |
| 39 | * +------------------+ |
| 40 | * |
| 41 | * The header is followed by multiple entry descriptors. In disk blocks, the |
| 42 | * entry descriptors are kept sorted. In inodes, they are unsorted. The |
| 43 | * attribute values are aligned to the end of the block in no specific order. |
| 44 | * |
| 45 | * Locking strategy |
| 46 | * ---------------- |
| 47 | * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem. |
| 48 | * EA blocks are only changed if they are exclusive to an inode, so |
| 49 | * holding xattr_sem also means that nothing but the EA block's reference |
| 50 | * count can change. Multiple writers to the same block are synchronized |
| 51 | * by the buffer lock. |
| 52 | */ |
| 53 | |
| 54 | #include <linux/init.h> |
| 55 | #include <linux/fs.h> |
| 56 | #include <linux/slab.h> |
| 57 | #include <linux/mbcache.h> |
| 58 | #include <linux/quotaops.h> |
| 59 | #include <linux/iversion.h> |
| 60 | #include "ext4_jbd2.h" |
| 61 | #include "ext4.h" |
| 62 | #include "xattr.h" |
| 63 | #include "acl.h" |
| 64 | |
| 65 | #ifdef EXT4_XATTR_DEBUG |
| 66 | # define ea_idebug(inode, fmt, ...) \ |
| 67 | printk(KERN_DEBUG "inode %s:%lu: " fmt "\n", \ |
| 68 | inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__) |
| 69 | # define ea_bdebug(bh, fmt, ...) \ |
| 70 | printk(KERN_DEBUG "block %pg:%lu: " fmt "\n", \ |
| 71 | bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__) |
| 72 | #else |
| 73 | # define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__) |
| 74 | # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__) |
| 75 | #endif |
| 76 | |
| 77 | static void ext4_xattr_block_cache_insert(struct mb_cache *, |
| 78 | struct buffer_head *); |
| 79 | static struct buffer_head * |
| 80 | ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *, |
| 81 | struct mb_cache_entry **); |
| 82 | static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value, |
| 83 | size_t value_count); |
| 84 | static void ext4_xattr_rehash(struct ext4_xattr_header *); |
| 85 | |
| 86 | static const struct xattr_handler * const ext4_xattr_handler_map[] = { |
| 87 | [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler, |
| 88 | #ifdef CONFIG_EXT4_FS_POSIX_ACL |
| 89 | [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, |
| 90 | [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler, |
| 91 | #endif |
| 92 | [EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler, |
| 93 | #ifdef CONFIG_EXT4_FS_SECURITY |
| 94 | [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler, |
| 95 | #endif |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 96 | [EXT4_XATTR_INDEX_HURD] = &ext4_xattr_hurd_handler, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 97 | }; |
| 98 | |
| 99 | const struct xattr_handler *ext4_xattr_handlers[] = { |
| 100 | &ext4_xattr_user_handler, |
| 101 | &ext4_xattr_trusted_handler, |
| 102 | #ifdef CONFIG_EXT4_FS_POSIX_ACL |
| 103 | &posix_acl_access_xattr_handler, |
| 104 | &posix_acl_default_xattr_handler, |
| 105 | #endif |
| 106 | #ifdef CONFIG_EXT4_FS_SECURITY |
| 107 | &ext4_xattr_security_handler, |
| 108 | #endif |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 109 | &ext4_xattr_hurd_handler, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 110 | NULL |
| 111 | }; |
| 112 | |
| 113 | #define EA_BLOCK_CACHE(inode) (((struct ext4_sb_info *) \ |
| 114 | inode->i_sb->s_fs_info)->s_ea_block_cache) |
| 115 | |
| 116 | #define EA_INODE_CACHE(inode) (((struct ext4_sb_info *) \ |
| 117 | inode->i_sb->s_fs_info)->s_ea_inode_cache) |
| 118 | |
| 119 | static int |
| 120 | ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array, |
| 121 | struct inode *inode); |
| 122 | |
| 123 | #ifdef CONFIG_LOCKDEP |
| 124 | void ext4_xattr_inode_set_class(struct inode *ea_inode) |
| 125 | { |
| 126 | lockdep_set_subclass(&ea_inode->i_rwsem, 1); |
| 127 | } |
| 128 | #endif |
| 129 | |
| 130 | static __le32 ext4_xattr_block_csum(struct inode *inode, |
| 131 | sector_t block_nr, |
| 132 | struct ext4_xattr_header *hdr) |
| 133 | { |
| 134 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 135 | __u32 csum; |
| 136 | __le64 dsk_block_nr = cpu_to_le64(block_nr); |
| 137 | __u32 dummy_csum = 0; |
| 138 | int offset = offsetof(struct ext4_xattr_header, h_checksum); |
| 139 | |
| 140 | csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr, |
| 141 | sizeof(dsk_block_nr)); |
| 142 | csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset); |
| 143 | csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum)); |
| 144 | offset += sizeof(dummy_csum); |
| 145 | csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset, |
| 146 | EXT4_BLOCK_SIZE(inode->i_sb) - offset); |
| 147 | |
| 148 | return cpu_to_le32(csum); |
| 149 | } |
| 150 | |
| 151 | static int ext4_xattr_block_csum_verify(struct inode *inode, |
| 152 | struct buffer_head *bh) |
| 153 | { |
| 154 | struct ext4_xattr_header *hdr = BHDR(bh); |
| 155 | int ret = 1; |
| 156 | |
| 157 | if (ext4_has_metadata_csum(inode->i_sb)) { |
| 158 | lock_buffer(bh); |
| 159 | ret = (hdr->h_checksum == ext4_xattr_block_csum(inode, |
| 160 | bh->b_blocknr, hdr)); |
| 161 | unlock_buffer(bh); |
| 162 | } |
| 163 | return ret; |
| 164 | } |
| 165 | |
| 166 | static void ext4_xattr_block_csum_set(struct inode *inode, |
| 167 | struct buffer_head *bh) |
| 168 | { |
| 169 | if (ext4_has_metadata_csum(inode->i_sb)) |
| 170 | BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode, |
| 171 | bh->b_blocknr, BHDR(bh)); |
| 172 | } |
| 173 | |
| 174 | static inline const struct xattr_handler * |
| 175 | ext4_xattr_handler(int name_index) |
| 176 | { |
| 177 | const struct xattr_handler *handler = NULL; |
| 178 | |
| 179 | if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map)) |
| 180 | handler = ext4_xattr_handler_map[name_index]; |
| 181 | return handler; |
| 182 | } |
| 183 | |
| 184 | static int |
| 185 | ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end, |
| 186 | void *value_start) |
| 187 | { |
| 188 | struct ext4_xattr_entry *e = entry; |
| 189 | |
| 190 | /* Find the end of the names list */ |
| 191 | while (!IS_LAST_ENTRY(e)) { |
| 192 | struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e); |
| 193 | if ((void *)next >= end) |
| 194 | return -EFSCORRUPTED; |
| 195 | if (strnlen(e->e_name, e->e_name_len) != e->e_name_len) |
| 196 | return -EFSCORRUPTED; |
| 197 | e = next; |
| 198 | } |
| 199 | |
| 200 | /* Check the values */ |
| 201 | while (!IS_LAST_ENTRY(entry)) { |
| 202 | u32 size = le32_to_cpu(entry->e_value_size); |
| 203 | |
| 204 | if (size > EXT4_XATTR_SIZE_MAX) |
| 205 | return -EFSCORRUPTED; |
| 206 | |
| 207 | if (size != 0 && entry->e_value_inum == 0) { |
| 208 | u16 offs = le16_to_cpu(entry->e_value_offs); |
| 209 | void *value; |
| 210 | |
| 211 | /* |
| 212 | * The value cannot overlap the names, and the value |
| 213 | * with padding cannot extend beyond 'end'. Check both |
| 214 | * the padded and unpadded sizes, since the size may |
| 215 | * overflow to 0 when adding padding. |
| 216 | */ |
| 217 | if (offs > end - value_start) |
| 218 | return -EFSCORRUPTED; |
| 219 | value = value_start + offs; |
| 220 | if (value < (void *)e + sizeof(u32) || |
| 221 | size > end - value || |
| 222 | EXT4_XATTR_SIZE(size) > end - value) |
| 223 | return -EFSCORRUPTED; |
| 224 | } |
| 225 | entry = EXT4_XATTR_NEXT(entry); |
| 226 | } |
| 227 | |
| 228 | return 0; |
| 229 | } |
| 230 | |
| 231 | static inline int |
| 232 | __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh, |
| 233 | const char *function, unsigned int line) |
| 234 | { |
| 235 | int error = -EFSCORRUPTED; |
| 236 | |
| 237 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || |
| 238 | BHDR(bh)->h_blocks != cpu_to_le32(1)) |
| 239 | goto errout; |
| 240 | if (buffer_verified(bh)) |
| 241 | return 0; |
| 242 | |
| 243 | error = -EFSBADCRC; |
| 244 | if (!ext4_xattr_block_csum_verify(inode, bh)) |
| 245 | goto errout; |
| 246 | error = ext4_xattr_check_entries(BFIRST(bh), bh->b_data + bh->b_size, |
| 247 | bh->b_data); |
| 248 | errout: |
| 249 | if (error) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 250 | __ext4_error_inode(inode, function, line, 0, -error, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 251 | "corrupted xattr block %llu", |
| 252 | (unsigned long long) bh->b_blocknr); |
| 253 | else |
| 254 | set_buffer_verified(bh); |
| 255 | return error; |
| 256 | } |
| 257 | |
| 258 | #define ext4_xattr_check_block(inode, bh) \ |
| 259 | __ext4_xattr_check_block((inode), (bh), __func__, __LINE__) |
| 260 | |
| 261 | |
| 262 | static int |
| 263 | __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header, |
| 264 | void *end, const char *function, unsigned int line) |
| 265 | { |
| 266 | int error = -EFSCORRUPTED; |
| 267 | |
| 268 | if (end - (void *)header < sizeof(*header) + sizeof(u32) || |
| 269 | (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC))) |
| 270 | goto errout; |
| 271 | error = ext4_xattr_check_entries(IFIRST(header), end, IFIRST(header)); |
| 272 | errout: |
| 273 | if (error) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 274 | __ext4_error_inode(inode, function, line, 0, -error, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 275 | "corrupted in-inode xattr"); |
| 276 | return error; |
| 277 | } |
| 278 | |
| 279 | #define xattr_check_inode(inode, header, end) \ |
| 280 | __xattr_check_inode((inode), (header), (end), __func__, __LINE__) |
| 281 | |
| 282 | static int |
| 283 | xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry, |
| 284 | void *end, int name_index, const char *name, int sorted) |
| 285 | { |
| 286 | struct ext4_xattr_entry *entry, *next; |
| 287 | size_t name_len; |
| 288 | int cmp = 1; |
| 289 | |
| 290 | if (name == NULL) |
| 291 | return -EINVAL; |
| 292 | name_len = strlen(name); |
| 293 | for (entry = *pentry; !IS_LAST_ENTRY(entry); entry = next) { |
| 294 | next = EXT4_XATTR_NEXT(entry); |
| 295 | if ((void *) next >= end) { |
| 296 | EXT4_ERROR_INODE(inode, "corrupted xattr entries"); |
| 297 | return -EFSCORRUPTED; |
| 298 | } |
| 299 | cmp = name_index - entry->e_name_index; |
| 300 | if (!cmp) |
| 301 | cmp = name_len - entry->e_name_len; |
| 302 | if (!cmp) |
| 303 | cmp = memcmp(name, entry->e_name, name_len); |
| 304 | if (cmp <= 0 && (sorted || cmp == 0)) |
| 305 | break; |
| 306 | } |
| 307 | *pentry = entry; |
| 308 | return cmp ? -ENODATA : 0; |
| 309 | } |
| 310 | |
| 311 | static u32 |
| 312 | ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size) |
| 313 | { |
| 314 | return ext4_chksum(sbi, sbi->s_csum_seed, buffer, size); |
| 315 | } |
| 316 | |
| 317 | static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode) |
| 318 | { |
| 319 | return ((u64)ea_inode->i_ctime.tv_sec << 32) | |
| 320 | (u32) inode_peek_iversion_raw(ea_inode); |
| 321 | } |
| 322 | |
| 323 | static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count) |
| 324 | { |
| 325 | ea_inode->i_ctime.tv_sec = (u32)(ref_count >> 32); |
| 326 | inode_set_iversion_raw(ea_inode, ref_count & 0xffffffff); |
| 327 | } |
| 328 | |
| 329 | static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode) |
| 330 | { |
| 331 | return (u32)ea_inode->i_atime.tv_sec; |
| 332 | } |
| 333 | |
| 334 | static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash) |
| 335 | { |
| 336 | ea_inode->i_atime.tv_sec = hash; |
| 337 | } |
| 338 | |
| 339 | /* |
| 340 | * Read the EA value from an inode. |
| 341 | */ |
| 342 | static int ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t size) |
| 343 | { |
| 344 | int blocksize = 1 << ea_inode->i_blkbits; |
| 345 | int bh_count = (size + blocksize - 1) >> ea_inode->i_blkbits; |
| 346 | int tail_size = (size % blocksize) ?: blocksize; |
| 347 | struct buffer_head *bhs_inline[8]; |
| 348 | struct buffer_head **bhs = bhs_inline; |
| 349 | int i, ret; |
| 350 | |
| 351 | if (bh_count > ARRAY_SIZE(bhs_inline)) { |
| 352 | bhs = kmalloc_array(bh_count, sizeof(*bhs), GFP_NOFS); |
| 353 | if (!bhs) |
| 354 | return -ENOMEM; |
| 355 | } |
| 356 | |
| 357 | ret = ext4_bread_batch(ea_inode, 0 /* block */, bh_count, |
| 358 | true /* wait */, bhs); |
| 359 | if (ret) |
| 360 | goto free_bhs; |
| 361 | |
| 362 | for (i = 0; i < bh_count; i++) { |
| 363 | /* There shouldn't be any holes in ea_inode. */ |
| 364 | if (!bhs[i]) { |
| 365 | ret = -EFSCORRUPTED; |
| 366 | goto put_bhs; |
| 367 | } |
| 368 | memcpy((char *)buf + blocksize * i, bhs[i]->b_data, |
| 369 | i < bh_count - 1 ? blocksize : tail_size); |
| 370 | } |
| 371 | ret = 0; |
| 372 | put_bhs: |
| 373 | for (i = 0; i < bh_count; i++) |
| 374 | brelse(bhs[i]); |
| 375 | free_bhs: |
| 376 | if (bhs != bhs_inline) |
| 377 | kfree(bhs); |
| 378 | return ret; |
| 379 | } |
| 380 | |
| 381 | #define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec) |
| 382 | |
| 383 | static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, |
| 384 | u32 ea_inode_hash, struct inode **ea_inode) |
| 385 | { |
| 386 | struct inode *inode; |
| 387 | int err; |
| 388 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 389 | inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 390 | if (IS_ERR(inode)) { |
| 391 | err = PTR_ERR(inode); |
| 392 | ext4_error(parent->i_sb, |
| 393 | "error while reading EA inode %lu err=%d", ea_ino, |
| 394 | err); |
| 395 | return err; |
| 396 | } |
| 397 | |
| 398 | if (is_bad_inode(inode)) { |
| 399 | ext4_error(parent->i_sb, |
| 400 | "error while reading EA inode %lu is_bad_inode", |
| 401 | ea_ino); |
| 402 | err = -EIO; |
| 403 | goto error; |
| 404 | } |
| 405 | |
| 406 | if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { |
| 407 | ext4_error(parent->i_sb, |
| 408 | "EA inode %lu does not have EXT4_EA_INODE_FL flag", |
| 409 | ea_ino); |
| 410 | err = -EINVAL; |
| 411 | goto error; |
| 412 | } |
| 413 | |
| 414 | ext4_xattr_inode_set_class(inode); |
| 415 | |
| 416 | /* |
| 417 | * Check whether this is an old Lustre-style xattr inode. Lustre |
| 418 | * implementation does not have hash validation, rather it has a |
| 419 | * backpointer from ea_inode to the parent inode. |
| 420 | */ |
| 421 | if (ea_inode_hash != ext4_xattr_inode_get_hash(inode) && |
| 422 | EXT4_XATTR_INODE_GET_PARENT(inode) == parent->i_ino && |
| 423 | inode->i_generation == parent->i_generation) { |
| 424 | ext4_set_inode_state(inode, EXT4_STATE_LUSTRE_EA_INODE); |
| 425 | ext4_xattr_inode_set_ref(inode, 1); |
| 426 | } else { |
| 427 | inode_lock(inode); |
| 428 | inode->i_flags |= S_NOQUOTA; |
| 429 | inode_unlock(inode); |
| 430 | } |
| 431 | |
| 432 | *ea_inode = inode; |
| 433 | return 0; |
| 434 | error: |
| 435 | iput(inode); |
| 436 | return err; |
| 437 | } |
| 438 | |
| 439 | static int |
| 440 | ext4_xattr_inode_verify_hashes(struct inode *ea_inode, |
| 441 | struct ext4_xattr_entry *entry, void *buffer, |
| 442 | size_t size) |
| 443 | { |
| 444 | u32 hash; |
| 445 | |
| 446 | /* Verify stored hash matches calculated hash. */ |
| 447 | hash = ext4_xattr_inode_hash(EXT4_SB(ea_inode->i_sb), buffer, size); |
| 448 | if (hash != ext4_xattr_inode_get_hash(ea_inode)) |
| 449 | return -EFSCORRUPTED; |
| 450 | |
| 451 | if (entry) { |
| 452 | __le32 e_hash, tmp_data; |
| 453 | |
| 454 | /* Verify entry hash. */ |
| 455 | tmp_data = cpu_to_le32(hash); |
| 456 | e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len, |
| 457 | &tmp_data, 1); |
| 458 | if (e_hash != entry->e_hash) |
| 459 | return -EFSCORRUPTED; |
| 460 | } |
| 461 | return 0; |
| 462 | } |
| 463 | |
| 464 | /* |
| 465 | * Read xattr value from the EA inode. |
| 466 | */ |
| 467 | static int |
| 468 | ext4_xattr_inode_get(struct inode *inode, struct ext4_xattr_entry *entry, |
| 469 | void *buffer, size_t size) |
| 470 | { |
| 471 | struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode); |
| 472 | struct inode *ea_inode; |
| 473 | int err; |
| 474 | |
| 475 | err = ext4_xattr_inode_iget(inode, le32_to_cpu(entry->e_value_inum), |
| 476 | le32_to_cpu(entry->e_hash), &ea_inode); |
| 477 | if (err) { |
| 478 | ea_inode = NULL; |
| 479 | goto out; |
| 480 | } |
| 481 | |
| 482 | if (i_size_read(ea_inode) != size) { |
| 483 | ext4_warning_inode(ea_inode, |
| 484 | "ea_inode file size=%llu entry size=%zu", |
| 485 | i_size_read(ea_inode), size); |
| 486 | err = -EFSCORRUPTED; |
| 487 | goto out; |
| 488 | } |
| 489 | |
| 490 | err = ext4_xattr_inode_read(ea_inode, buffer, size); |
| 491 | if (err) |
| 492 | goto out; |
| 493 | |
| 494 | if (!ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE)) { |
| 495 | err = ext4_xattr_inode_verify_hashes(ea_inode, entry, buffer, |
| 496 | size); |
| 497 | if (err) { |
| 498 | ext4_warning_inode(ea_inode, |
| 499 | "EA inode hash validation failed"); |
| 500 | goto out; |
| 501 | } |
| 502 | |
| 503 | if (ea_inode_cache) |
| 504 | mb_cache_entry_create(ea_inode_cache, GFP_NOFS, |
| 505 | ext4_xattr_inode_get_hash(ea_inode), |
| 506 | ea_inode->i_ino, true /* reusable */); |
| 507 | } |
| 508 | out: |
| 509 | iput(ea_inode); |
| 510 | return err; |
| 511 | } |
| 512 | |
| 513 | static int |
| 514 | ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, |
| 515 | void *buffer, size_t buffer_size) |
| 516 | { |
| 517 | struct buffer_head *bh = NULL; |
| 518 | struct ext4_xattr_entry *entry; |
| 519 | size_t size; |
| 520 | void *end; |
| 521 | int error; |
| 522 | struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); |
| 523 | |
| 524 | ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", |
| 525 | name_index, name, buffer, (long)buffer_size); |
| 526 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 527 | if (!EXT4_I(inode)->i_file_acl) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 528 | return -ENODATA; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 529 | ea_idebug(inode, "reading block %llu", |
| 530 | (unsigned long long)EXT4_I(inode)->i_file_acl); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 531 | bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); |
| 532 | if (IS_ERR(bh)) |
| 533 | return PTR_ERR(bh); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 534 | ea_bdebug(bh, "b_count=%d, refcount=%d", |
| 535 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); |
| 536 | error = ext4_xattr_check_block(inode, bh); |
| 537 | if (error) |
| 538 | goto cleanup; |
| 539 | ext4_xattr_block_cache_insert(ea_block_cache, bh); |
| 540 | entry = BFIRST(bh); |
| 541 | end = bh->b_data + bh->b_size; |
| 542 | error = xattr_find_entry(inode, &entry, end, name_index, name, 1); |
| 543 | if (error) |
| 544 | goto cleanup; |
| 545 | size = le32_to_cpu(entry->e_value_size); |
| 546 | error = -ERANGE; |
| 547 | if (unlikely(size > EXT4_XATTR_SIZE_MAX)) |
| 548 | goto cleanup; |
| 549 | if (buffer) { |
| 550 | if (size > buffer_size) |
| 551 | goto cleanup; |
| 552 | if (entry->e_value_inum) { |
| 553 | error = ext4_xattr_inode_get(inode, entry, buffer, |
| 554 | size); |
| 555 | if (error) |
| 556 | goto cleanup; |
| 557 | } else { |
| 558 | u16 offset = le16_to_cpu(entry->e_value_offs); |
| 559 | void *p = bh->b_data + offset; |
| 560 | |
| 561 | if (unlikely(p + size > end)) |
| 562 | goto cleanup; |
| 563 | memcpy(buffer, p, size); |
| 564 | } |
| 565 | } |
| 566 | error = size; |
| 567 | |
| 568 | cleanup: |
| 569 | brelse(bh); |
| 570 | return error; |
| 571 | } |
| 572 | |
| 573 | int |
| 574 | ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, |
| 575 | void *buffer, size_t buffer_size) |
| 576 | { |
| 577 | struct ext4_xattr_ibody_header *header; |
| 578 | struct ext4_xattr_entry *entry; |
| 579 | struct ext4_inode *raw_inode; |
| 580 | struct ext4_iloc iloc; |
| 581 | size_t size; |
| 582 | void *end; |
| 583 | int error; |
| 584 | |
| 585 | if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) |
| 586 | return -ENODATA; |
| 587 | error = ext4_get_inode_loc(inode, &iloc); |
| 588 | if (error) |
| 589 | return error; |
| 590 | raw_inode = ext4_raw_inode(&iloc); |
| 591 | header = IHDR(inode, raw_inode); |
| 592 | end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; |
| 593 | error = xattr_check_inode(inode, header, end); |
| 594 | if (error) |
| 595 | goto cleanup; |
| 596 | entry = IFIRST(header); |
| 597 | error = xattr_find_entry(inode, &entry, end, name_index, name, 0); |
| 598 | if (error) |
| 599 | goto cleanup; |
| 600 | size = le32_to_cpu(entry->e_value_size); |
| 601 | error = -ERANGE; |
| 602 | if (unlikely(size > EXT4_XATTR_SIZE_MAX)) |
| 603 | goto cleanup; |
| 604 | if (buffer) { |
| 605 | if (size > buffer_size) |
| 606 | goto cleanup; |
| 607 | if (entry->e_value_inum) { |
| 608 | error = ext4_xattr_inode_get(inode, entry, buffer, |
| 609 | size); |
| 610 | if (error) |
| 611 | goto cleanup; |
| 612 | } else { |
| 613 | u16 offset = le16_to_cpu(entry->e_value_offs); |
| 614 | void *p = (void *)IFIRST(header) + offset; |
| 615 | |
| 616 | if (unlikely(p + size > end)) |
| 617 | goto cleanup; |
| 618 | memcpy(buffer, p, size); |
| 619 | } |
| 620 | } |
| 621 | error = size; |
| 622 | |
| 623 | cleanup: |
| 624 | brelse(iloc.bh); |
| 625 | return error; |
| 626 | } |
| 627 | |
| 628 | /* |
| 629 | * ext4_xattr_get() |
| 630 | * |
| 631 | * Copy an extended attribute into the buffer |
| 632 | * provided, or compute the buffer size required. |
| 633 | * Buffer is NULL to compute the size of the buffer required. |
| 634 | * |
| 635 | * Returns a negative error number on failure, or the number of bytes |
| 636 | * used / required on success. |
| 637 | */ |
| 638 | int |
| 639 | ext4_xattr_get(struct inode *inode, int name_index, const char *name, |
| 640 | void *buffer, size_t buffer_size) |
| 641 | { |
| 642 | int error; |
| 643 | |
| 644 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) |
| 645 | return -EIO; |
| 646 | |
| 647 | if (strlen(name) > 255) |
| 648 | return -ERANGE; |
| 649 | |
| 650 | down_read(&EXT4_I(inode)->xattr_sem); |
| 651 | error = ext4_xattr_ibody_get(inode, name_index, name, buffer, |
| 652 | buffer_size); |
| 653 | if (error == -ENODATA) |
| 654 | error = ext4_xattr_block_get(inode, name_index, name, buffer, |
| 655 | buffer_size); |
| 656 | up_read(&EXT4_I(inode)->xattr_sem); |
| 657 | return error; |
| 658 | } |
| 659 | |
| 660 | static int |
| 661 | ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry, |
| 662 | char *buffer, size_t buffer_size) |
| 663 | { |
| 664 | size_t rest = buffer_size; |
| 665 | |
| 666 | for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { |
| 667 | const struct xattr_handler *handler = |
| 668 | ext4_xattr_handler(entry->e_name_index); |
| 669 | |
| 670 | if (handler && (!handler->list || handler->list(dentry))) { |
| 671 | const char *prefix = handler->prefix ?: handler->name; |
| 672 | size_t prefix_len = strlen(prefix); |
| 673 | size_t size = prefix_len + entry->e_name_len + 1; |
| 674 | |
| 675 | if (buffer) { |
| 676 | if (size > rest) |
| 677 | return -ERANGE; |
| 678 | memcpy(buffer, prefix, prefix_len); |
| 679 | buffer += prefix_len; |
| 680 | memcpy(buffer, entry->e_name, entry->e_name_len); |
| 681 | buffer += entry->e_name_len; |
| 682 | *buffer++ = 0; |
| 683 | } |
| 684 | rest -= size; |
| 685 | } |
| 686 | } |
| 687 | return buffer_size - rest; /* total size */ |
| 688 | } |
| 689 | |
| 690 | static int |
| 691 | ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) |
| 692 | { |
| 693 | struct inode *inode = d_inode(dentry); |
| 694 | struct buffer_head *bh = NULL; |
| 695 | int error; |
| 696 | |
| 697 | ea_idebug(inode, "buffer=%p, buffer_size=%ld", |
| 698 | buffer, (long)buffer_size); |
| 699 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 700 | if (!EXT4_I(inode)->i_file_acl) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 701 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 702 | ea_idebug(inode, "reading block %llu", |
| 703 | (unsigned long long)EXT4_I(inode)->i_file_acl); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 704 | bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); |
| 705 | if (IS_ERR(bh)) |
| 706 | return PTR_ERR(bh); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 707 | ea_bdebug(bh, "b_count=%d, refcount=%d", |
| 708 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); |
| 709 | error = ext4_xattr_check_block(inode, bh); |
| 710 | if (error) |
| 711 | goto cleanup; |
| 712 | ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 713 | error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, |
| 714 | buffer_size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 715 | cleanup: |
| 716 | brelse(bh); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 717 | return error; |
| 718 | } |
| 719 | |
| 720 | static int |
| 721 | ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) |
| 722 | { |
| 723 | struct inode *inode = d_inode(dentry); |
| 724 | struct ext4_xattr_ibody_header *header; |
| 725 | struct ext4_inode *raw_inode; |
| 726 | struct ext4_iloc iloc; |
| 727 | void *end; |
| 728 | int error; |
| 729 | |
| 730 | if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) |
| 731 | return 0; |
| 732 | error = ext4_get_inode_loc(inode, &iloc); |
| 733 | if (error) |
| 734 | return error; |
| 735 | raw_inode = ext4_raw_inode(&iloc); |
| 736 | header = IHDR(inode, raw_inode); |
| 737 | end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; |
| 738 | error = xattr_check_inode(inode, header, end); |
| 739 | if (error) |
| 740 | goto cleanup; |
| 741 | error = ext4_xattr_list_entries(dentry, IFIRST(header), |
| 742 | buffer, buffer_size); |
| 743 | |
| 744 | cleanup: |
| 745 | brelse(iloc.bh); |
| 746 | return error; |
| 747 | } |
| 748 | |
| 749 | /* |
| 750 | * Inode operation listxattr() |
| 751 | * |
| 752 | * d_inode(dentry)->i_rwsem: don't care |
| 753 | * |
| 754 | * Copy a list of attribute names into the buffer |
| 755 | * provided, or compute the buffer size required. |
| 756 | * Buffer is NULL to compute the size of the buffer required. |
| 757 | * |
| 758 | * Returns a negative error number on failure, or the number of bytes |
| 759 | * used / required on success. |
| 760 | */ |
| 761 | ssize_t |
| 762 | ext4_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) |
| 763 | { |
| 764 | int ret, ret2; |
| 765 | |
| 766 | down_read(&EXT4_I(d_inode(dentry))->xattr_sem); |
| 767 | ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size); |
| 768 | if (ret < 0) |
| 769 | goto errout; |
| 770 | if (buffer) { |
| 771 | buffer += ret; |
| 772 | buffer_size -= ret; |
| 773 | } |
| 774 | ret = ext4_xattr_block_list(dentry, buffer, buffer_size); |
| 775 | if (ret < 0) |
| 776 | goto errout; |
| 777 | ret += ret2; |
| 778 | errout: |
| 779 | up_read(&EXT4_I(d_inode(dentry))->xattr_sem); |
| 780 | return ret; |
| 781 | } |
| 782 | |
| 783 | /* |
| 784 | * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is |
| 785 | * not set, set it. |
| 786 | */ |
| 787 | static void ext4_xattr_update_super_block(handle_t *handle, |
| 788 | struct super_block *sb) |
| 789 | { |
| 790 | if (ext4_has_feature_xattr(sb)) |
| 791 | return; |
| 792 | |
| 793 | BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); |
| 794 | if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) { |
| 795 | ext4_set_feature_xattr(sb); |
| 796 | ext4_handle_dirty_super(handle, sb); |
| 797 | } |
| 798 | } |
| 799 | |
| 800 | int ext4_get_inode_usage(struct inode *inode, qsize_t *usage) |
| 801 | { |
| 802 | struct ext4_iloc iloc = { .bh = NULL }; |
| 803 | struct buffer_head *bh = NULL; |
| 804 | struct ext4_inode *raw_inode; |
| 805 | struct ext4_xattr_ibody_header *header; |
| 806 | struct ext4_xattr_entry *entry; |
| 807 | qsize_t ea_inode_refs = 0; |
| 808 | void *end; |
| 809 | int ret; |
| 810 | |
| 811 | lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem); |
| 812 | |
| 813 | if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { |
| 814 | ret = ext4_get_inode_loc(inode, &iloc); |
| 815 | if (ret) |
| 816 | goto out; |
| 817 | raw_inode = ext4_raw_inode(&iloc); |
| 818 | header = IHDR(inode, raw_inode); |
| 819 | end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; |
| 820 | ret = xattr_check_inode(inode, header, end); |
| 821 | if (ret) |
| 822 | goto out; |
| 823 | |
| 824 | for (entry = IFIRST(header); !IS_LAST_ENTRY(entry); |
| 825 | entry = EXT4_XATTR_NEXT(entry)) |
| 826 | if (entry->e_value_inum) |
| 827 | ea_inode_refs++; |
| 828 | } |
| 829 | |
| 830 | if (EXT4_I(inode)->i_file_acl) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 831 | bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); |
| 832 | if (IS_ERR(bh)) { |
| 833 | ret = PTR_ERR(bh); |
| 834 | bh = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 835 | goto out; |
| 836 | } |
| 837 | |
| 838 | ret = ext4_xattr_check_block(inode, bh); |
| 839 | if (ret) |
| 840 | goto out; |
| 841 | |
| 842 | for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry); |
| 843 | entry = EXT4_XATTR_NEXT(entry)) |
| 844 | if (entry->e_value_inum) |
| 845 | ea_inode_refs++; |
| 846 | } |
| 847 | *usage = ea_inode_refs + 1; |
| 848 | ret = 0; |
| 849 | out: |
| 850 | brelse(iloc.bh); |
| 851 | brelse(bh); |
| 852 | return ret; |
| 853 | } |
| 854 | |
| 855 | static inline size_t round_up_cluster(struct inode *inode, size_t length) |
| 856 | { |
| 857 | struct super_block *sb = inode->i_sb; |
| 858 | size_t cluster_size = 1 << (EXT4_SB(sb)->s_cluster_bits + |
| 859 | inode->i_blkbits); |
| 860 | size_t mask = ~(cluster_size - 1); |
| 861 | |
| 862 | return (length + cluster_size - 1) & mask; |
| 863 | } |
| 864 | |
| 865 | static int ext4_xattr_inode_alloc_quota(struct inode *inode, size_t len) |
| 866 | { |
| 867 | int err; |
| 868 | |
| 869 | err = dquot_alloc_inode(inode); |
| 870 | if (err) |
| 871 | return err; |
| 872 | err = dquot_alloc_space_nodirty(inode, round_up_cluster(inode, len)); |
| 873 | if (err) |
| 874 | dquot_free_inode(inode); |
| 875 | return err; |
| 876 | } |
| 877 | |
| 878 | static void ext4_xattr_inode_free_quota(struct inode *parent, |
| 879 | struct inode *ea_inode, |
| 880 | size_t len) |
| 881 | { |
| 882 | if (ea_inode && |
| 883 | ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE)) |
| 884 | return; |
| 885 | dquot_free_space_nodirty(parent, round_up_cluster(parent, len)); |
| 886 | dquot_free_inode(parent); |
| 887 | } |
| 888 | |
| 889 | int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode, |
| 890 | struct buffer_head *block_bh, size_t value_len, |
| 891 | bool is_create) |
| 892 | { |
| 893 | int credits; |
| 894 | int blocks; |
| 895 | |
| 896 | /* |
| 897 | * 1) Owner inode update |
| 898 | * 2) Ref count update on old xattr block |
| 899 | * 3) new xattr block |
| 900 | * 4) block bitmap update for new xattr block |
| 901 | * 5) group descriptor for new xattr block |
| 902 | * 6) block bitmap update for old xattr block |
| 903 | * 7) group descriptor for old block |
| 904 | * |
| 905 | * 6 & 7 can happen if we have two racing threads T_a and T_b |
| 906 | * which are each trying to set an xattr on inodes I_a and I_b |
| 907 | * which were both initially sharing an xattr block. |
| 908 | */ |
| 909 | credits = 7; |
| 910 | |
| 911 | /* Quota updates. */ |
| 912 | credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(sb); |
| 913 | |
| 914 | /* |
| 915 | * In case of inline data, we may push out the data to a block, |
| 916 | * so we need to reserve credits for this eventuality |
| 917 | */ |
| 918 | if (inode && ext4_has_inline_data(inode)) |
| 919 | credits += ext4_writepage_trans_blocks(inode) + 1; |
| 920 | |
| 921 | /* We are done if ea_inode feature is not enabled. */ |
| 922 | if (!ext4_has_feature_ea_inode(sb)) |
| 923 | return credits; |
| 924 | |
| 925 | /* New ea_inode, inode map, block bitmap, group descriptor. */ |
| 926 | credits += 4; |
| 927 | |
| 928 | /* Data blocks. */ |
| 929 | blocks = (value_len + sb->s_blocksize - 1) >> sb->s_blocksize_bits; |
| 930 | |
| 931 | /* Indirection block or one level of extent tree. */ |
| 932 | blocks += 1; |
| 933 | |
| 934 | /* Block bitmap and group descriptor updates for each block. */ |
| 935 | credits += blocks * 2; |
| 936 | |
| 937 | /* Blocks themselves. */ |
| 938 | credits += blocks; |
| 939 | |
| 940 | if (!is_create) { |
| 941 | /* Dereference ea_inode holding old xattr value. |
| 942 | * Old ea_inode, inode map, block bitmap, group descriptor. |
| 943 | */ |
| 944 | credits += 4; |
| 945 | |
| 946 | /* Data blocks for old ea_inode. */ |
| 947 | blocks = XATTR_SIZE_MAX >> sb->s_blocksize_bits; |
| 948 | |
| 949 | /* Indirection block or one level of extent tree for old |
| 950 | * ea_inode. |
| 951 | */ |
| 952 | blocks += 1; |
| 953 | |
| 954 | /* Block bitmap and group descriptor updates for each block. */ |
| 955 | credits += blocks * 2; |
| 956 | } |
| 957 | |
| 958 | /* We may need to clone the existing xattr block in which case we need |
| 959 | * to increment ref counts for existing ea_inodes referenced by it. |
| 960 | */ |
| 961 | if (block_bh) { |
| 962 | struct ext4_xattr_entry *entry = BFIRST(block_bh); |
| 963 | |
| 964 | for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) |
| 965 | if (entry->e_value_inum) |
| 966 | /* Ref count update on ea_inode. */ |
| 967 | credits += 1; |
| 968 | } |
| 969 | return credits; |
| 970 | } |
| 971 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 972 | static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, |
| 973 | int ref_change) |
| 974 | { |
| 975 | struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode); |
| 976 | struct ext4_iloc iloc; |
| 977 | s64 ref_count; |
| 978 | u32 hash; |
| 979 | int ret; |
| 980 | |
| 981 | inode_lock(ea_inode); |
| 982 | |
| 983 | ret = ext4_reserve_inode_write(handle, ea_inode, &iloc); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 984 | if (ret) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 985 | goto out; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 986 | |
| 987 | ref_count = ext4_xattr_inode_get_ref(ea_inode); |
| 988 | ref_count += ref_change; |
| 989 | ext4_xattr_inode_set_ref(ea_inode, ref_count); |
| 990 | |
| 991 | if (ref_change > 0) { |
| 992 | WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld", |
| 993 | ea_inode->i_ino, ref_count); |
| 994 | |
| 995 | if (ref_count == 1) { |
| 996 | WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u", |
| 997 | ea_inode->i_ino, ea_inode->i_nlink); |
| 998 | |
| 999 | set_nlink(ea_inode, 1); |
| 1000 | ext4_orphan_del(handle, ea_inode); |
| 1001 | |
| 1002 | if (ea_inode_cache) { |
| 1003 | hash = ext4_xattr_inode_get_hash(ea_inode); |
| 1004 | mb_cache_entry_create(ea_inode_cache, |
| 1005 | GFP_NOFS, hash, |
| 1006 | ea_inode->i_ino, |
| 1007 | true /* reusable */); |
| 1008 | } |
| 1009 | } |
| 1010 | } else { |
| 1011 | WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld", |
| 1012 | ea_inode->i_ino, ref_count); |
| 1013 | |
| 1014 | if (ref_count == 0) { |
| 1015 | WARN_ONCE(ea_inode->i_nlink != 1, |
| 1016 | "EA inode %lu i_nlink=%u", |
| 1017 | ea_inode->i_ino, ea_inode->i_nlink); |
| 1018 | |
| 1019 | clear_nlink(ea_inode); |
| 1020 | ext4_orphan_add(handle, ea_inode); |
| 1021 | |
| 1022 | if (ea_inode_cache) { |
| 1023 | hash = ext4_xattr_inode_get_hash(ea_inode); |
| 1024 | mb_cache_entry_delete(ea_inode_cache, hash, |
| 1025 | ea_inode->i_ino); |
| 1026 | } |
| 1027 | } |
| 1028 | } |
| 1029 | |
| 1030 | ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1031 | if (ret) |
| 1032 | ext4_warning_inode(ea_inode, |
| 1033 | "ext4_mark_iloc_dirty() failed ret=%d", ret); |
| 1034 | out: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1035 | inode_unlock(ea_inode); |
| 1036 | return ret; |
| 1037 | } |
| 1038 | |
| 1039 | static int ext4_xattr_inode_inc_ref(handle_t *handle, struct inode *ea_inode) |
| 1040 | { |
| 1041 | return ext4_xattr_inode_update_ref(handle, ea_inode, 1); |
| 1042 | } |
| 1043 | |
| 1044 | static int ext4_xattr_inode_dec_ref(handle_t *handle, struct inode *ea_inode) |
| 1045 | { |
| 1046 | return ext4_xattr_inode_update_ref(handle, ea_inode, -1); |
| 1047 | } |
| 1048 | |
| 1049 | static int ext4_xattr_inode_inc_ref_all(handle_t *handle, struct inode *parent, |
| 1050 | struct ext4_xattr_entry *first) |
| 1051 | { |
| 1052 | struct inode *ea_inode; |
| 1053 | struct ext4_xattr_entry *entry; |
| 1054 | struct ext4_xattr_entry *failed_entry; |
| 1055 | unsigned int ea_ino; |
| 1056 | int err, saved_err; |
| 1057 | |
| 1058 | for (entry = first; !IS_LAST_ENTRY(entry); |
| 1059 | entry = EXT4_XATTR_NEXT(entry)) { |
| 1060 | if (!entry->e_value_inum) |
| 1061 | continue; |
| 1062 | ea_ino = le32_to_cpu(entry->e_value_inum); |
| 1063 | err = ext4_xattr_inode_iget(parent, ea_ino, |
| 1064 | le32_to_cpu(entry->e_hash), |
| 1065 | &ea_inode); |
| 1066 | if (err) |
| 1067 | goto cleanup; |
| 1068 | err = ext4_xattr_inode_inc_ref(handle, ea_inode); |
| 1069 | if (err) { |
| 1070 | ext4_warning_inode(ea_inode, "inc ref error %d", err); |
| 1071 | iput(ea_inode); |
| 1072 | goto cleanup; |
| 1073 | } |
| 1074 | iput(ea_inode); |
| 1075 | } |
| 1076 | return 0; |
| 1077 | |
| 1078 | cleanup: |
| 1079 | saved_err = err; |
| 1080 | failed_entry = entry; |
| 1081 | |
| 1082 | for (entry = first; entry != failed_entry; |
| 1083 | entry = EXT4_XATTR_NEXT(entry)) { |
| 1084 | if (!entry->e_value_inum) |
| 1085 | continue; |
| 1086 | ea_ino = le32_to_cpu(entry->e_value_inum); |
| 1087 | err = ext4_xattr_inode_iget(parent, ea_ino, |
| 1088 | le32_to_cpu(entry->e_hash), |
| 1089 | &ea_inode); |
| 1090 | if (err) { |
| 1091 | ext4_warning(parent->i_sb, |
| 1092 | "cleanup ea_ino %u iget error %d", ea_ino, |
| 1093 | err); |
| 1094 | continue; |
| 1095 | } |
| 1096 | err = ext4_xattr_inode_dec_ref(handle, ea_inode); |
| 1097 | if (err) |
| 1098 | ext4_warning_inode(ea_inode, "cleanup dec ref error %d", |
| 1099 | err); |
| 1100 | iput(ea_inode); |
| 1101 | } |
| 1102 | return saved_err; |
| 1103 | } |
| 1104 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1105 | static int ext4_xattr_restart_fn(handle_t *handle, struct inode *inode, |
| 1106 | struct buffer_head *bh, bool block_csum, bool dirty) |
| 1107 | { |
| 1108 | int error; |
| 1109 | |
| 1110 | if (bh && dirty) { |
| 1111 | if (block_csum) |
| 1112 | ext4_xattr_block_csum_set(inode, bh); |
| 1113 | error = ext4_handle_dirty_metadata(handle, NULL, bh); |
| 1114 | if (error) { |
| 1115 | ext4_warning(inode->i_sb, "Handle metadata (error %d)", |
| 1116 | error); |
| 1117 | return error; |
| 1118 | } |
| 1119 | } |
| 1120 | return 0; |
| 1121 | } |
| 1122 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1123 | static void |
| 1124 | ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent, |
| 1125 | struct buffer_head *bh, |
| 1126 | struct ext4_xattr_entry *first, bool block_csum, |
| 1127 | struct ext4_xattr_inode_array **ea_inode_array, |
| 1128 | int extra_credits, bool skip_quota) |
| 1129 | { |
| 1130 | struct inode *ea_inode; |
| 1131 | struct ext4_xattr_entry *entry; |
| 1132 | bool dirty = false; |
| 1133 | unsigned int ea_ino; |
| 1134 | int err; |
| 1135 | int credits; |
| 1136 | |
| 1137 | /* One credit for dec ref on ea_inode, one for orphan list addition, */ |
| 1138 | credits = 2 + extra_credits; |
| 1139 | |
| 1140 | for (entry = first; !IS_LAST_ENTRY(entry); |
| 1141 | entry = EXT4_XATTR_NEXT(entry)) { |
| 1142 | if (!entry->e_value_inum) |
| 1143 | continue; |
| 1144 | ea_ino = le32_to_cpu(entry->e_value_inum); |
| 1145 | err = ext4_xattr_inode_iget(parent, ea_ino, |
| 1146 | le32_to_cpu(entry->e_hash), |
| 1147 | &ea_inode); |
| 1148 | if (err) |
| 1149 | continue; |
| 1150 | |
| 1151 | err = ext4_expand_inode_array(ea_inode_array, ea_inode); |
| 1152 | if (err) { |
| 1153 | ext4_warning_inode(ea_inode, |
| 1154 | "Expand inode array err=%d", err); |
| 1155 | iput(ea_inode); |
| 1156 | continue; |
| 1157 | } |
| 1158 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1159 | err = ext4_journal_ensure_credits_fn(handle, credits, credits, |
| 1160 | ext4_free_metadata_revoke_credits(parent->i_sb, 1), |
| 1161 | ext4_xattr_restart_fn(handle, parent, bh, block_csum, |
| 1162 | dirty)); |
| 1163 | if (err < 0) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1164 | ext4_warning_inode(ea_inode, "Ensure credits err=%d", |
| 1165 | err); |
| 1166 | continue; |
| 1167 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1168 | if (err > 0) { |
| 1169 | err = ext4_journal_get_write_access(handle, bh); |
| 1170 | if (err) { |
| 1171 | ext4_warning_inode(ea_inode, |
| 1172 | "Re-get write access err=%d", |
| 1173 | err); |
| 1174 | continue; |
| 1175 | } |
| 1176 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1177 | |
| 1178 | err = ext4_xattr_inode_dec_ref(handle, ea_inode); |
| 1179 | if (err) { |
| 1180 | ext4_warning_inode(ea_inode, "ea_inode dec ref err=%d", |
| 1181 | err); |
| 1182 | continue; |
| 1183 | } |
| 1184 | |
| 1185 | if (!skip_quota) |
| 1186 | ext4_xattr_inode_free_quota(parent, ea_inode, |
| 1187 | le32_to_cpu(entry->e_value_size)); |
| 1188 | |
| 1189 | /* |
| 1190 | * Forget about ea_inode within the same transaction that |
| 1191 | * decrements the ref count. This avoids duplicate decrements in |
| 1192 | * case the rest of the work spills over to subsequent |
| 1193 | * transactions. |
| 1194 | */ |
| 1195 | entry->e_value_inum = 0; |
| 1196 | entry->e_value_size = 0; |
| 1197 | |
| 1198 | dirty = true; |
| 1199 | } |
| 1200 | |
| 1201 | if (dirty) { |
| 1202 | /* |
| 1203 | * Note that we are deliberately skipping csum calculation for |
| 1204 | * the final update because we do not expect any journal |
| 1205 | * restarts until xattr block is freed. |
| 1206 | */ |
| 1207 | |
| 1208 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
| 1209 | if (err) |
| 1210 | ext4_warning_inode(parent, |
| 1211 | "handle dirty metadata err=%d", err); |
| 1212 | } |
| 1213 | } |
| 1214 | |
| 1215 | /* |
| 1216 | * Release the xattr block BH: If the reference count is > 1, decrement it; |
| 1217 | * otherwise free the block. |
| 1218 | */ |
| 1219 | static void |
| 1220 | ext4_xattr_release_block(handle_t *handle, struct inode *inode, |
| 1221 | struct buffer_head *bh, |
| 1222 | struct ext4_xattr_inode_array **ea_inode_array, |
| 1223 | int extra_credits) |
| 1224 | { |
| 1225 | struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); |
| 1226 | u32 hash, ref; |
| 1227 | int error = 0; |
| 1228 | |
| 1229 | BUFFER_TRACE(bh, "get_write_access"); |
| 1230 | error = ext4_journal_get_write_access(handle, bh); |
| 1231 | if (error) |
| 1232 | goto out; |
| 1233 | |
| 1234 | lock_buffer(bh); |
| 1235 | hash = le32_to_cpu(BHDR(bh)->h_hash); |
| 1236 | ref = le32_to_cpu(BHDR(bh)->h_refcount); |
| 1237 | if (ref == 1) { |
| 1238 | ea_bdebug(bh, "refcount now=0; freeing"); |
| 1239 | /* |
| 1240 | * This must happen under buffer lock for |
| 1241 | * ext4_xattr_block_set() to reliably detect freed block |
| 1242 | */ |
| 1243 | if (ea_block_cache) |
| 1244 | mb_cache_entry_delete(ea_block_cache, hash, |
| 1245 | bh->b_blocknr); |
| 1246 | get_bh(bh); |
| 1247 | unlock_buffer(bh); |
| 1248 | |
| 1249 | if (ext4_has_feature_ea_inode(inode->i_sb)) |
| 1250 | ext4_xattr_inode_dec_ref_all(handle, inode, bh, |
| 1251 | BFIRST(bh), |
| 1252 | true /* block_csum */, |
| 1253 | ea_inode_array, |
| 1254 | extra_credits, |
| 1255 | true /* skip_quota */); |
| 1256 | ext4_free_blocks(handle, inode, bh, 0, 1, |
| 1257 | EXT4_FREE_BLOCKS_METADATA | |
| 1258 | EXT4_FREE_BLOCKS_FORGET); |
| 1259 | } else { |
| 1260 | ref--; |
| 1261 | BHDR(bh)->h_refcount = cpu_to_le32(ref); |
| 1262 | if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) { |
| 1263 | struct mb_cache_entry *ce; |
| 1264 | |
| 1265 | if (ea_block_cache) { |
| 1266 | ce = mb_cache_entry_get(ea_block_cache, hash, |
| 1267 | bh->b_blocknr); |
| 1268 | if (ce) { |
| 1269 | ce->e_reusable = 1; |
| 1270 | mb_cache_entry_put(ea_block_cache, ce); |
| 1271 | } |
| 1272 | } |
| 1273 | } |
| 1274 | |
| 1275 | ext4_xattr_block_csum_set(inode, bh); |
| 1276 | /* |
| 1277 | * Beware of this ugliness: Releasing of xattr block references |
| 1278 | * from different inodes can race and so we have to protect |
| 1279 | * from a race where someone else frees the block (and releases |
| 1280 | * its journal_head) before we are done dirtying the buffer. In |
| 1281 | * nojournal mode this race is harmless and we actually cannot |
| 1282 | * call ext4_handle_dirty_metadata() with locked buffer as |
| 1283 | * that function can call sync_dirty_buffer() so for that case |
| 1284 | * we handle the dirtying after unlocking the buffer. |
| 1285 | */ |
| 1286 | if (ext4_handle_valid(handle)) |
| 1287 | error = ext4_handle_dirty_metadata(handle, inode, bh); |
| 1288 | unlock_buffer(bh); |
| 1289 | if (!ext4_handle_valid(handle)) |
| 1290 | error = ext4_handle_dirty_metadata(handle, inode, bh); |
| 1291 | if (IS_SYNC(inode)) |
| 1292 | ext4_handle_sync(handle); |
| 1293 | dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); |
| 1294 | ea_bdebug(bh, "refcount now=%d; releasing", |
| 1295 | le32_to_cpu(BHDR(bh)->h_refcount)); |
| 1296 | } |
| 1297 | out: |
| 1298 | ext4_std_error(inode->i_sb, error); |
| 1299 | return; |
| 1300 | } |
| 1301 | |
| 1302 | /* |
| 1303 | * Find the available free space for EAs. This also returns the total number of |
| 1304 | * bytes used by EA entries. |
| 1305 | */ |
| 1306 | static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last, |
| 1307 | size_t *min_offs, void *base, int *total) |
| 1308 | { |
| 1309 | for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { |
| 1310 | if (!last->e_value_inum && last->e_value_size) { |
| 1311 | size_t offs = le16_to_cpu(last->e_value_offs); |
| 1312 | if (offs < *min_offs) |
| 1313 | *min_offs = offs; |
| 1314 | } |
| 1315 | if (total) |
| 1316 | *total += EXT4_XATTR_LEN(last->e_name_len); |
| 1317 | } |
| 1318 | return (*min_offs - ((void *)last - base) - sizeof(__u32)); |
| 1319 | } |
| 1320 | |
| 1321 | /* |
| 1322 | * Write the value of the EA in an inode. |
| 1323 | */ |
| 1324 | static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode, |
| 1325 | const void *buf, int bufsize) |
| 1326 | { |
| 1327 | struct buffer_head *bh = NULL; |
| 1328 | unsigned long block = 0; |
| 1329 | int blocksize = ea_inode->i_sb->s_blocksize; |
| 1330 | int max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits; |
| 1331 | int csize, wsize = 0; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1332 | int ret = 0, ret2 = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1333 | int retries = 0; |
| 1334 | |
| 1335 | retry: |
| 1336 | while (ret >= 0 && ret < max_blocks) { |
| 1337 | struct ext4_map_blocks map; |
| 1338 | map.m_lblk = block += ret; |
| 1339 | map.m_len = max_blocks -= ret; |
| 1340 | |
| 1341 | ret = ext4_map_blocks(handle, ea_inode, &map, |
| 1342 | EXT4_GET_BLOCKS_CREATE); |
| 1343 | if (ret <= 0) { |
| 1344 | ext4_mark_inode_dirty(handle, ea_inode); |
| 1345 | if (ret == -ENOSPC && |
| 1346 | ext4_should_retry_alloc(ea_inode->i_sb, &retries)) { |
| 1347 | ret = 0; |
| 1348 | goto retry; |
| 1349 | } |
| 1350 | break; |
| 1351 | } |
| 1352 | } |
| 1353 | |
| 1354 | if (ret < 0) |
| 1355 | return ret; |
| 1356 | |
| 1357 | block = 0; |
| 1358 | while (wsize < bufsize) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1359 | brelse(bh); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1360 | csize = (bufsize - wsize) > blocksize ? blocksize : |
| 1361 | bufsize - wsize; |
| 1362 | bh = ext4_getblk(handle, ea_inode, block, 0); |
| 1363 | if (IS_ERR(bh)) |
| 1364 | return PTR_ERR(bh); |
| 1365 | if (!bh) { |
| 1366 | WARN_ON_ONCE(1); |
| 1367 | EXT4_ERROR_INODE(ea_inode, |
| 1368 | "ext4_getblk() return bh = NULL"); |
| 1369 | return -EFSCORRUPTED; |
| 1370 | } |
| 1371 | ret = ext4_journal_get_write_access(handle, bh); |
| 1372 | if (ret) |
| 1373 | goto out; |
| 1374 | |
| 1375 | memcpy(bh->b_data, buf, csize); |
| 1376 | set_buffer_uptodate(bh); |
| 1377 | ext4_handle_dirty_metadata(handle, ea_inode, bh); |
| 1378 | |
| 1379 | buf += csize; |
| 1380 | wsize += csize; |
| 1381 | block += 1; |
| 1382 | } |
| 1383 | |
| 1384 | inode_lock(ea_inode); |
| 1385 | i_size_write(ea_inode, wsize); |
| 1386 | ext4_update_i_disksize(ea_inode, wsize); |
| 1387 | inode_unlock(ea_inode); |
| 1388 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1389 | ret2 = ext4_mark_inode_dirty(handle, ea_inode); |
| 1390 | if (unlikely(ret2 && !ret)) |
| 1391 | ret = ret2; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1392 | |
| 1393 | out: |
| 1394 | brelse(bh); |
| 1395 | |
| 1396 | return ret; |
| 1397 | } |
| 1398 | |
| 1399 | /* |
| 1400 | * Create an inode to store the value of a large EA. |
| 1401 | */ |
| 1402 | static struct inode *ext4_xattr_inode_create(handle_t *handle, |
| 1403 | struct inode *inode, u32 hash) |
| 1404 | { |
| 1405 | struct inode *ea_inode = NULL; |
| 1406 | uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) }; |
| 1407 | int err; |
| 1408 | |
| 1409 | /* |
| 1410 | * Let the next inode be the goal, so we try and allocate the EA inode |
| 1411 | * in the same group, or nearby one. |
| 1412 | */ |
| 1413 | ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode, |
| 1414 | S_IFREG | 0600, NULL, inode->i_ino + 1, owner, |
| 1415 | EXT4_EA_INODE_FL); |
| 1416 | if (!IS_ERR(ea_inode)) { |
| 1417 | ea_inode->i_op = &ext4_file_inode_operations; |
| 1418 | ea_inode->i_fop = &ext4_file_operations; |
| 1419 | ext4_set_aops(ea_inode); |
| 1420 | ext4_xattr_inode_set_class(ea_inode); |
| 1421 | unlock_new_inode(ea_inode); |
| 1422 | ext4_xattr_inode_set_ref(ea_inode, 1); |
| 1423 | ext4_xattr_inode_set_hash(ea_inode, hash); |
| 1424 | err = ext4_mark_inode_dirty(handle, ea_inode); |
| 1425 | if (!err) |
| 1426 | err = ext4_inode_attach_jinode(ea_inode); |
| 1427 | if (err) { |
| 1428 | iput(ea_inode); |
| 1429 | return ERR_PTR(err); |
| 1430 | } |
| 1431 | |
| 1432 | /* |
| 1433 | * Xattr inodes are shared therefore quota charging is performed |
| 1434 | * at a higher level. |
| 1435 | */ |
| 1436 | dquot_free_inode(ea_inode); |
| 1437 | dquot_drop(ea_inode); |
| 1438 | inode_lock(ea_inode); |
| 1439 | ea_inode->i_flags |= S_NOQUOTA; |
| 1440 | inode_unlock(ea_inode); |
| 1441 | } |
| 1442 | |
| 1443 | return ea_inode; |
| 1444 | } |
| 1445 | |
| 1446 | static struct inode * |
| 1447 | ext4_xattr_inode_cache_find(struct inode *inode, const void *value, |
| 1448 | size_t value_len, u32 hash) |
| 1449 | { |
| 1450 | struct inode *ea_inode; |
| 1451 | struct mb_cache_entry *ce; |
| 1452 | struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode); |
| 1453 | void *ea_data; |
| 1454 | |
| 1455 | if (!ea_inode_cache) |
| 1456 | return NULL; |
| 1457 | |
| 1458 | ce = mb_cache_entry_find_first(ea_inode_cache, hash); |
| 1459 | if (!ce) |
| 1460 | return NULL; |
| 1461 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1462 | WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) && |
| 1463 | !(current->flags & PF_MEMALLOC_NOFS)); |
| 1464 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1465 | ea_data = kvmalloc(value_len, GFP_KERNEL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1466 | if (!ea_data) { |
| 1467 | mb_cache_entry_put(ea_inode_cache, ce); |
| 1468 | return NULL; |
| 1469 | } |
| 1470 | |
| 1471 | while (ce) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1472 | ea_inode = ext4_iget(inode->i_sb, ce->e_value, |
| 1473 | EXT4_IGET_NORMAL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1474 | if (!IS_ERR(ea_inode) && |
| 1475 | !is_bad_inode(ea_inode) && |
| 1476 | (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) && |
| 1477 | i_size_read(ea_inode) == value_len && |
| 1478 | !ext4_xattr_inode_read(ea_inode, ea_data, value_len) && |
| 1479 | !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data, |
| 1480 | value_len) && |
| 1481 | !memcmp(value, ea_data, value_len)) { |
| 1482 | mb_cache_entry_touch(ea_inode_cache, ce); |
| 1483 | mb_cache_entry_put(ea_inode_cache, ce); |
| 1484 | kvfree(ea_data); |
| 1485 | return ea_inode; |
| 1486 | } |
| 1487 | |
| 1488 | if (!IS_ERR(ea_inode)) |
| 1489 | iput(ea_inode); |
| 1490 | ce = mb_cache_entry_find_next(ea_inode_cache, ce); |
| 1491 | } |
| 1492 | kvfree(ea_data); |
| 1493 | return NULL; |
| 1494 | } |
| 1495 | |
| 1496 | /* |
| 1497 | * Add value of the EA in an inode. |
| 1498 | */ |
| 1499 | static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode, |
| 1500 | const void *value, size_t value_len, |
| 1501 | struct inode **ret_inode) |
| 1502 | { |
| 1503 | struct inode *ea_inode; |
| 1504 | u32 hash; |
| 1505 | int err; |
| 1506 | |
| 1507 | hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len); |
| 1508 | ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash); |
| 1509 | if (ea_inode) { |
| 1510 | err = ext4_xattr_inode_inc_ref(handle, ea_inode); |
| 1511 | if (err) { |
| 1512 | iput(ea_inode); |
| 1513 | return err; |
| 1514 | } |
| 1515 | |
| 1516 | *ret_inode = ea_inode; |
| 1517 | return 0; |
| 1518 | } |
| 1519 | |
| 1520 | /* Create an inode for the EA value */ |
| 1521 | ea_inode = ext4_xattr_inode_create(handle, inode, hash); |
| 1522 | if (IS_ERR(ea_inode)) |
| 1523 | return PTR_ERR(ea_inode); |
| 1524 | |
| 1525 | err = ext4_xattr_inode_write(handle, ea_inode, value, value_len); |
| 1526 | if (err) { |
| 1527 | ext4_xattr_inode_dec_ref(handle, ea_inode); |
| 1528 | iput(ea_inode); |
| 1529 | return err; |
| 1530 | } |
| 1531 | |
| 1532 | if (EA_INODE_CACHE(inode)) |
| 1533 | mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash, |
| 1534 | ea_inode->i_ino, true /* reusable */); |
| 1535 | |
| 1536 | *ret_inode = ea_inode; |
| 1537 | return 0; |
| 1538 | } |
| 1539 | |
| 1540 | /* |
| 1541 | * Reserve min(block_size/8, 1024) bytes for xattr entries/names if ea_inode |
| 1542 | * feature is enabled. |
| 1543 | */ |
| 1544 | #define EXT4_XATTR_BLOCK_RESERVE(inode) min(i_blocksize(inode)/8, 1024U) |
| 1545 | |
| 1546 | static int ext4_xattr_set_entry(struct ext4_xattr_info *i, |
| 1547 | struct ext4_xattr_search *s, |
| 1548 | handle_t *handle, struct inode *inode, |
| 1549 | bool is_block) |
| 1550 | { |
| 1551 | struct ext4_xattr_entry *last, *next; |
| 1552 | struct ext4_xattr_entry *here = s->here; |
| 1553 | size_t min_offs = s->end - s->base, name_len = strlen(i->name); |
| 1554 | int in_inode = i->in_inode; |
| 1555 | struct inode *old_ea_inode = NULL; |
| 1556 | struct inode *new_ea_inode = NULL; |
| 1557 | size_t old_size, new_size; |
| 1558 | int ret; |
| 1559 | |
| 1560 | /* Space used by old and new values. */ |
| 1561 | old_size = (!s->not_found && !here->e_value_inum) ? |
| 1562 | EXT4_XATTR_SIZE(le32_to_cpu(here->e_value_size)) : 0; |
| 1563 | new_size = (i->value && !in_inode) ? EXT4_XATTR_SIZE(i->value_len) : 0; |
| 1564 | |
| 1565 | /* |
| 1566 | * Optimization for the simple case when old and new values have the |
| 1567 | * same padded sizes. Not applicable if external inodes are involved. |
| 1568 | */ |
| 1569 | if (new_size && new_size == old_size) { |
| 1570 | size_t offs = le16_to_cpu(here->e_value_offs); |
| 1571 | void *val = s->base + offs; |
| 1572 | |
| 1573 | here->e_value_size = cpu_to_le32(i->value_len); |
| 1574 | if (i->value == EXT4_ZERO_XATTR_VALUE) { |
| 1575 | memset(val, 0, new_size); |
| 1576 | } else { |
| 1577 | memcpy(val, i->value, i->value_len); |
| 1578 | /* Clear padding bytes. */ |
| 1579 | memset(val + i->value_len, 0, new_size - i->value_len); |
| 1580 | } |
| 1581 | goto update_hash; |
| 1582 | } |
| 1583 | |
| 1584 | /* Compute min_offs and last. */ |
| 1585 | last = s->first; |
| 1586 | for (; !IS_LAST_ENTRY(last); last = next) { |
| 1587 | next = EXT4_XATTR_NEXT(last); |
| 1588 | if ((void *)next >= s->end) { |
| 1589 | EXT4_ERROR_INODE(inode, "corrupted xattr entries"); |
| 1590 | ret = -EFSCORRUPTED; |
| 1591 | goto out; |
| 1592 | } |
| 1593 | if (!last->e_value_inum && last->e_value_size) { |
| 1594 | size_t offs = le16_to_cpu(last->e_value_offs); |
| 1595 | if (offs < min_offs) |
| 1596 | min_offs = offs; |
| 1597 | } |
| 1598 | } |
| 1599 | |
| 1600 | /* Check whether we have enough space. */ |
| 1601 | if (i->value) { |
| 1602 | size_t free; |
| 1603 | |
| 1604 | free = min_offs - ((void *)last - s->base) - sizeof(__u32); |
| 1605 | if (!s->not_found) |
| 1606 | free += EXT4_XATTR_LEN(name_len) + old_size; |
| 1607 | |
| 1608 | if (free < EXT4_XATTR_LEN(name_len) + new_size) { |
| 1609 | ret = -ENOSPC; |
| 1610 | goto out; |
| 1611 | } |
| 1612 | |
| 1613 | /* |
| 1614 | * If storing the value in an external inode is an option, |
| 1615 | * reserve space for xattr entries/names in the external |
| 1616 | * attribute block so that a long value does not occupy the |
| 1617 | * whole space and prevent futher entries being added. |
| 1618 | */ |
| 1619 | if (ext4_has_feature_ea_inode(inode->i_sb) && |
| 1620 | new_size && is_block && |
| 1621 | (min_offs + old_size - new_size) < |
| 1622 | EXT4_XATTR_BLOCK_RESERVE(inode)) { |
| 1623 | ret = -ENOSPC; |
| 1624 | goto out; |
| 1625 | } |
| 1626 | } |
| 1627 | |
| 1628 | /* |
| 1629 | * Getting access to old and new ea inodes is subject to failures. |
| 1630 | * Finish that work before doing any modifications to the xattr data. |
| 1631 | */ |
| 1632 | if (!s->not_found && here->e_value_inum) { |
| 1633 | ret = ext4_xattr_inode_iget(inode, |
| 1634 | le32_to_cpu(here->e_value_inum), |
| 1635 | le32_to_cpu(here->e_hash), |
| 1636 | &old_ea_inode); |
| 1637 | if (ret) { |
| 1638 | old_ea_inode = NULL; |
| 1639 | goto out; |
| 1640 | } |
| 1641 | } |
| 1642 | if (i->value && in_inode) { |
| 1643 | WARN_ON_ONCE(!i->value_len); |
| 1644 | |
| 1645 | ret = ext4_xattr_inode_alloc_quota(inode, i->value_len); |
| 1646 | if (ret) |
| 1647 | goto out; |
| 1648 | |
| 1649 | ret = ext4_xattr_inode_lookup_create(handle, inode, i->value, |
| 1650 | i->value_len, |
| 1651 | &new_ea_inode); |
| 1652 | if (ret) { |
| 1653 | new_ea_inode = NULL; |
| 1654 | ext4_xattr_inode_free_quota(inode, NULL, i->value_len); |
| 1655 | goto out; |
| 1656 | } |
| 1657 | } |
| 1658 | |
| 1659 | if (old_ea_inode) { |
| 1660 | /* We are ready to release ref count on the old_ea_inode. */ |
| 1661 | ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode); |
| 1662 | if (ret) { |
| 1663 | /* Release newly required ref count on new_ea_inode. */ |
| 1664 | if (new_ea_inode) { |
| 1665 | int err; |
| 1666 | |
| 1667 | err = ext4_xattr_inode_dec_ref(handle, |
| 1668 | new_ea_inode); |
| 1669 | if (err) |
| 1670 | ext4_warning_inode(new_ea_inode, |
| 1671 | "dec ref new_ea_inode err=%d", |
| 1672 | err); |
| 1673 | ext4_xattr_inode_free_quota(inode, new_ea_inode, |
| 1674 | i->value_len); |
| 1675 | } |
| 1676 | goto out; |
| 1677 | } |
| 1678 | |
| 1679 | ext4_xattr_inode_free_quota(inode, old_ea_inode, |
| 1680 | le32_to_cpu(here->e_value_size)); |
| 1681 | } |
| 1682 | |
| 1683 | /* No failures allowed past this point. */ |
| 1684 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1685 | if (!s->not_found && here->e_value_size && !here->e_value_inum) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1686 | /* Remove the old value. */ |
| 1687 | void *first_val = s->base + min_offs; |
| 1688 | size_t offs = le16_to_cpu(here->e_value_offs); |
| 1689 | void *val = s->base + offs; |
| 1690 | |
| 1691 | memmove(first_val + old_size, first_val, val - first_val); |
| 1692 | memset(first_val, 0, old_size); |
| 1693 | min_offs += old_size; |
| 1694 | |
| 1695 | /* Adjust all value offsets. */ |
| 1696 | last = s->first; |
| 1697 | while (!IS_LAST_ENTRY(last)) { |
| 1698 | size_t o = le16_to_cpu(last->e_value_offs); |
| 1699 | |
| 1700 | if (!last->e_value_inum && |
| 1701 | last->e_value_size && o < offs) |
| 1702 | last->e_value_offs = cpu_to_le16(o + old_size); |
| 1703 | last = EXT4_XATTR_NEXT(last); |
| 1704 | } |
| 1705 | } |
| 1706 | |
| 1707 | if (!i->value) { |
| 1708 | /* Remove old name. */ |
| 1709 | size_t size = EXT4_XATTR_LEN(name_len); |
| 1710 | |
| 1711 | last = ENTRY((void *)last - size); |
| 1712 | memmove(here, (void *)here + size, |
| 1713 | (void *)last - (void *)here + sizeof(__u32)); |
| 1714 | memset(last, 0, size); |
| 1715 | } else if (s->not_found) { |
| 1716 | /* Insert new name. */ |
| 1717 | size_t size = EXT4_XATTR_LEN(name_len); |
| 1718 | size_t rest = (void *)last - (void *)here + sizeof(__u32); |
| 1719 | |
| 1720 | memmove((void *)here + size, here, rest); |
| 1721 | memset(here, 0, size); |
| 1722 | here->e_name_index = i->name_index; |
| 1723 | here->e_name_len = name_len; |
| 1724 | memcpy(here->e_name, i->name, name_len); |
| 1725 | } else { |
| 1726 | /* This is an update, reset value info. */ |
| 1727 | here->e_value_inum = 0; |
| 1728 | here->e_value_offs = 0; |
| 1729 | here->e_value_size = 0; |
| 1730 | } |
| 1731 | |
| 1732 | if (i->value) { |
| 1733 | /* Insert new value. */ |
| 1734 | if (in_inode) { |
| 1735 | here->e_value_inum = cpu_to_le32(new_ea_inode->i_ino); |
| 1736 | } else if (i->value_len) { |
| 1737 | void *val = s->base + min_offs - new_size; |
| 1738 | |
| 1739 | here->e_value_offs = cpu_to_le16(min_offs - new_size); |
| 1740 | if (i->value == EXT4_ZERO_XATTR_VALUE) { |
| 1741 | memset(val, 0, new_size); |
| 1742 | } else { |
| 1743 | memcpy(val, i->value, i->value_len); |
| 1744 | /* Clear padding bytes. */ |
| 1745 | memset(val + i->value_len, 0, |
| 1746 | new_size - i->value_len); |
| 1747 | } |
| 1748 | } |
| 1749 | here->e_value_size = cpu_to_le32(i->value_len); |
| 1750 | } |
| 1751 | |
| 1752 | update_hash: |
| 1753 | if (i->value) { |
| 1754 | __le32 hash = 0; |
| 1755 | |
| 1756 | /* Entry hash calculation. */ |
| 1757 | if (in_inode) { |
| 1758 | __le32 crc32c_hash; |
| 1759 | |
| 1760 | /* |
| 1761 | * Feed crc32c hash instead of the raw value for entry |
| 1762 | * hash calculation. This is to avoid walking |
| 1763 | * potentially long value buffer again. |
| 1764 | */ |
| 1765 | crc32c_hash = cpu_to_le32( |
| 1766 | ext4_xattr_inode_get_hash(new_ea_inode)); |
| 1767 | hash = ext4_xattr_hash_entry(here->e_name, |
| 1768 | here->e_name_len, |
| 1769 | &crc32c_hash, 1); |
| 1770 | } else if (is_block) { |
| 1771 | __le32 *value = s->base + le16_to_cpu( |
| 1772 | here->e_value_offs); |
| 1773 | |
| 1774 | hash = ext4_xattr_hash_entry(here->e_name, |
| 1775 | here->e_name_len, value, |
| 1776 | new_size >> 2); |
| 1777 | } |
| 1778 | here->e_hash = hash; |
| 1779 | } |
| 1780 | |
| 1781 | if (is_block) |
| 1782 | ext4_xattr_rehash((struct ext4_xattr_header *)s->base); |
| 1783 | |
| 1784 | ret = 0; |
| 1785 | out: |
| 1786 | iput(old_ea_inode); |
| 1787 | iput(new_ea_inode); |
| 1788 | return ret; |
| 1789 | } |
| 1790 | |
| 1791 | struct ext4_xattr_block_find { |
| 1792 | struct ext4_xattr_search s; |
| 1793 | struct buffer_head *bh; |
| 1794 | }; |
| 1795 | |
| 1796 | static int |
| 1797 | ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, |
| 1798 | struct ext4_xattr_block_find *bs) |
| 1799 | { |
| 1800 | struct super_block *sb = inode->i_sb; |
| 1801 | int error; |
| 1802 | |
| 1803 | ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", |
| 1804 | i->name_index, i->name, i->value, (long)i->value_len); |
| 1805 | |
| 1806 | if (EXT4_I(inode)->i_file_acl) { |
| 1807 | /* The inode already has an extended attribute block. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1808 | bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1809 | if (IS_ERR(bs->bh)) { |
| 1810 | error = PTR_ERR(bs->bh); |
| 1811 | bs->bh = NULL; |
| 1812 | return error; |
| 1813 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1814 | ea_bdebug(bs->bh, "b_count=%d, refcount=%d", |
| 1815 | atomic_read(&(bs->bh->b_count)), |
| 1816 | le32_to_cpu(BHDR(bs->bh)->h_refcount)); |
| 1817 | error = ext4_xattr_check_block(inode, bs->bh); |
| 1818 | if (error) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1819 | return error; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1820 | /* Find the named attribute. */ |
| 1821 | bs->s.base = BHDR(bs->bh); |
| 1822 | bs->s.first = BFIRST(bs->bh); |
| 1823 | bs->s.end = bs->bh->b_data + bs->bh->b_size; |
| 1824 | bs->s.here = bs->s.first; |
| 1825 | error = xattr_find_entry(inode, &bs->s.here, bs->s.end, |
| 1826 | i->name_index, i->name, 1); |
| 1827 | if (error && error != -ENODATA) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1828 | return error; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1829 | bs->s.not_found = error; |
| 1830 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1831 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1832 | } |
| 1833 | |
| 1834 | static int |
| 1835 | ext4_xattr_block_set(handle_t *handle, struct inode *inode, |
| 1836 | struct ext4_xattr_info *i, |
| 1837 | struct ext4_xattr_block_find *bs) |
| 1838 | { |
| 1839 | struct super_block *sb = inode->i_sb; |
| 1840 | struct buffer_head *new_bh = NULL; |
| 1841 | struct ext4_xattr_search s_copy = bs->s; |
| 1842 | struct ext4_xattr_search *s = &s_copy; |
| 1843 | struct mb_cache_entry *ce = NULL; |
| 1844 | int error = 0; |
| 1845 | struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); |
| 1846 | struct inode *ea_inode = NULL, *tmp_inode; |
| 1847 | size_t old_ea_inode_quota = 0; |
| 1848 | unsigned int ea_ino; |
| 1849 | |
| 1850 | |
| 1851 | #define header(x) ((struct ext4_xattr_header *)(x)) |
| 1852 | |
| 1853 | if (s->base) { |
| 1854 | BUFFER_TRACE(bs->bh, "get_write_access"); |
| 1855 | error = ext4_journal_get_write_access(handle, bs->bh); |
| 1856 | if (error) |
| 1857 | goto cleanup; |
| 1858 | lock_buffer(bs->bh); |
| 1859 | |
| 1860 | if (header(s->base)->h_refcount == cpu_to_le32(1)) { |
| 1861 | __u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash); |
| 1862 | |
| 1863 | /* |
| 1864 | * This must happen under buffer lock for |
| 1865 | * ext4_xattr_block_set() to reliably detect modified |
| 1866 | * block |
| 1867 | */ |
| 1868 | if (ea_block_cache) |
| 1869 | mb_cache_entry_delete(ea_block_cache, hash, |
| 1870 | bs->bh->b_blocknr); |
| 1871 | ea_bdebug(bs->bh, "modifying in-place"); |
| 1872 | error = ext4_xattr_set_entry(i, s, handle, inode, |
| 1873 | true /* is_block */); |
| 1874 | ext4_xattr_block_csum_set(inode, bs->bh); |
| 1875 | unlock_buffer(bs->bh); |
| 1876 | if (error == -EFSCORRUPTED) |
| 1877 | goto bad_block; |
| 1878 | if (!error) |
| 1879 | error = ext4_handle_dirty_metadata(handle, |
| 1880 | inode, |
| 1881 | bs->bh); |
| 1882 | if (error) |
| 1883 | goto cleanup; |
| 1884 | goto inserted; |
| 1885 | } else { |
| 1886 | int offset = (char *)s->here - bs->bh->b_data; |
| 1887 | |
| 1888 | unlock_buffer(bs->bh); |
| 1889 | ea_bdebug(bs->bh, "cloning"); |
| 1890 | s->base = kmalloc(bs->bh->b_size, GFP_NOFS); |
| 1891 | error = -ENOMEM; |
| 1892 | if (s->base == NULL) |
| 1893 | goto cleanup; |
| 1894 | memcpy(s->base, BHDR(bs->bh), bs->bh->b_size); |
| 1895 | s->first = ENTRY(header(s->base)+1); |
| 1896 | header(s->base)->h_refcount = cpu_to_le32(1); |
| 1897 | s->here = ENTRY(s->base + offset); |
| 1898 | s->end = s->base + bs->bh->b_size; |
| 1899 | |
| 1900 | /* |
| 1901 | * If existing entry points to an xattr inode, we need |
| 1902 | * to prevent ext4_xattr_set_entry() from decrementing |
| 1903 | * ref count on it because the reference belongs to the |
| 1904 | * original block. In this case, make the entry look |
| 1905 | * like it has an empty value. |
| 1906 | */ |
| 1907 | if (!s->not_found && s->here->e_value_inum) { |
| 1908 | ea_ino = le32_to_cpu(s->here->e_value_inum); |
| 1909 | error = ext4_xattr_inode_iget(inode, ea_ino, |
| 1910 | le32_to_cpu(s->here->e_hash), |
| 1911 | &tmp_inode); |
| 1912 | if (error) |
| 1913 | goto cleanup; |
| 1914 | |
| 1915 | if (!ext4_test_inode_state(tmp_inode, |
| 1916 | EXT4_STATE_LUSTRE_EA_INODE)) { |
| 1917 | /* |
| 1918 | * Defer quota free call for previous |
| 1919 | * inode until success is guaranteed. |
| 1920 | */ |
| 1921 | old_ea_inode_quota = le32_to_cpu( |
| 1922 | s->here->e_value_size); |
| 1923 | } |
| 1924 | iput(tmp_inode); |
| 1925 | |
| 1926 | s->here->e_value_inum = 0; |
| 1927 | s->here->e_value_size = 0; |
| 1928 | } |
| 1929 | } |
| 1930 | } else { |
| 1931 | /* Allocate a buffer where we construct the new block. */ |
| 1932 | s->base = kzalloc(sb->s_blocksize, GFP_NOFS); |
| 1933 | /* assert(header == s->base) */ |
| 1934 | error = -ENOMEM; |
| 1935 | if (s->base == NULL) |
| 1936 | goto cleanup; |
| 1937 | header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); |
| 1938 | header(s->base)->h_blocks = cpu_to_le32(1); |
| 1939 | header(s->base)->h_refcount = cpu_to_le32(1); |
| 1940 | s->first = ENTRY(header(s->base)+1); |
| 1941 | s->here = ENTRY(header(s->base)+1); |
| 1942 | s->end = s->base + sb->s_blocksize; |
| 1943 | } |
| 1944 | |
| 1945 | error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */); |
| 1946 | if (error == -EFSCORRUPTED) |
| 1947 | goto bad_block; |
| 1948 | if (error) |
| 1949 | goto cleanup; |
| 1950 | |
| 1951 | if (i->value && s->here->e_value_inum) { |
| 1952 | /* |
| 1953 | * A ref count on ea_inode has been taken as part of the call to |
| 1954 | * ext4_xattr_set_entry() above. We would like to drop this |
| 1955 | * extra ref but we have to wait until the xattr block is |
| 1956 | * initialized and has its own ref count on the ea_inode. |
| 1957 | */ |
| 1958 | ea_ino = le32_to_cpu(s->here->e_value_inum); |
| 1959 | error = ext4_xattr_inode_iget(inode, ea_ino, |
| 1960 | le32_to_cpu(s->here->e_hash), |
| 1961 | &ea_inode); |
| 1962 | if (error) { |
| 1963 | ea_inode = NULL; |
| 1964 | goto cleanup; |
| 1965 | } |
| 1966 | } |
| 1967 | |
| 1968 | inserted: |
| 1969 | if (!IS_LAST_ENTRY(s->first)) { |
| 1970 | new_bh = ext4_xattr_block_cache_find(inode, header(s->base), |
| 1971 | &ce); |
| 1972 | if (new_bh) { |
| 1973 | /* We found an identical block in the cache. */ |
| 1974 | if (new_bh == bs->bh) |
| 1975 | ea_bdebug(new_bh, "keeping"); |
| 1976 | else { |
| 1977 | u32 ref; |
| 1978 | |
| 1979 | WARN_ON_ONCE(dquot_initialize_needed(inode)); |
| 1980 | |
| 1981 | /* The old block is released after updating |
| 1982 | the inode. */ |
| 1983 | error = dquot_alloc_block(inode, |
| 1984 | EXT4_C2B(EXT4_SB(sb), 1)); |
| 1985 | if (error) |
| 1986 | goto cleanup; |
| 1987 | BUFFER_TRACE(new_bh, "get_write_access"); |
| 1988 | error = ext4_journal_get_write_access(handle, |
| 1989 | new_bh); |
| 1990 | if (error) |
| 1991 | goto cleanup_dquot; |
| 1992 | lock_buffer(new_bh); |
| 1993 | /* |
| 1994 | * We have to be careful about races with |
| 1995 | * freeing, rehashing or adding references to |
| 1996 | * xattr block. Once we hold buffer lock xattr |
| 1997 | * block's state is stable so we can check |
| 1998 | * whether the block got freed / rehashed or |
| 1999 | * not. Since we unhash mbcache entry under |
| 2000 | * buffer lock when freeing / rehashing xattr |
| 2001 | * block, checking whether entry is still |
| 2002 | * hashed is reliable. Same rules hold for |
| 2003 | * e_reusable handling. |
| 2004 | */ |
| 2005 | if (hlist_bl_unhashed(&ce->e_hash_list) || |
| 2006 | !ce->e_reusable) { |
| 2007 | /* |
| 2008 | * Undo everything and check mbcache |
| 2009 | * again. |
| 2010 | */ |
| 2011 | unlock_buffer(new_bh); |
| 2012 | dquot_free_block(inode, |
| 2013 | EXT4_C2B(EXT4_SB(sb), |
| 2014 | 1)); |
| 2015 | brelse(new_bh); |
| 2016 | mb_cache_entry_put(ea_block_cache, ce); |
| 2017 | ce = NULL; |
| 2018 | new_bh = NULL; |
| 2019 | goto inserted; |
| 2020 | } |
| 2021 | ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1; |
| 2022 | BHDR(new_bh)->h_refcount = cpu_to_le32(ref); |
| 2023 | if (ref >= EXT4_XATTR_REFCOUNT_MAX) |
| 2024 | ce->e_reusable = 0; |
| 2025 | ea_bdebug(new_bh, "reusing; refcount now=%d", |
| 2026 | ref); |
| 2027 | ext4_xattr_block_csum_set(inode, new_bh); |
| 2028 | unlock_buffer(new_bh); |
| 2029 | error = ext4_handle_dirty_metadata(handle, |
| 2030 | inode, |
| 2031 | new_bh); |
| 2032 | if (error) |
| 2033 | goto cleanup_dquot; |
| 2034 | } |
| 2035 | mb_cache_entry_touch(ea_block_cache, ce); |
| 2036 | mb_cache_entry_put(ea_block_cache, ce); |
| 2037 | ce = NULL; |
| 2038 | } else if (bs->bh && s->base == bs->bh->b_data) { |
| 2039 | /* We were modifying this block in-place. */ |
| 2040 | ea_bdebug(bs->bh, "keeping this block"); |
| 2041 | ext4_xattr_block_cache_insert(ea_block_cache, bs->bh); |
| 2042 | new_bh = bs->bh; |
| 2043 | get_bh(new_bh); |
| 2044 | } else { |
| 2045 | /* We need to allocate a new block */ |
| 2046 | ext4_fsblk_t goal, block; |
| 2047 | |
| 2048 | WARN_ON_ONCE(dquot_initialize_needed(inode)); |
| 2049 | |
| 2050 | goal = ext4_group_first_block_no(sb, |
| 2051 | EXT4_I(inode)->i_block_group); |
| 2052 | |
| 2053 | /* non-extent files can't have physical blocks past 2^32 */ |
| 2054 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
| 2055 | goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; |
| 2056 | |
| 2057 | block = ext4_new_meta_blocks(handle, inode, goal, 0, |
| 2058 | NULL, &error); |
| 2059 | if (error) |
| 2060 | goto cleanup; |
| 2061 | |
| 2062 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
| 2063 | BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS); |
| 2064 | |
| 2065 | ea_idebug(inode, "creating block %llu", |
| 2066 | (unsigned long long)block); |
| 2067 | |
| 2068 | new_bh = sb_getblk(sb, block); |
| 2069 | if (unlikely(!new_bh)) { |
| 2070 | error = -ENOMEM; |
| 2071 | getblk_failed: |
| 2072 | ext4_free_blocks(handle, inode, NULL, block, 1, |
| 2073 | EXT4_FREE_BLOCKS_METADATA); |
| 2074 | goto cleanup; |
| 2075 | } |
| 2076 | error = ext4_xattr_inode_inc_ref_all(handle, inode, |
| 2077 | ENTRY(header(s->base)+1)); |
| 2078 | if (error) |
| 2079 | goto getblk_failed; |
| 2080 | if (ea_inode) { |
| 2081 | /* Drop the extra ref on ea_inode. */ |
| 2082 | error = ext4_xattr_inode_dec_ref(handle, |
| 2083 | ea_inode); |
| 2084 | if (error) |
| 2085 | ext4_warning_inode(ea_inode, |
| 2086 | "dec ref error=%d", |
| 2087 | error); |
| 2088 | iput(ea_inode); |
| 2089 | ea_inode = NULL; |
| 2090 | } |
| 2091 | |
| 2092 | lock_buffer(new_bh); |
| 2093 | error = ext4_journal_get_create_access(handle, new_bh); |
| 2094 | if (error) { |
| 2095 | unlock_buffer(new_bh); |
| 2096 | error = -EIO; |
| 2097 | goto getblk_failed; |
| 2098 | } |
| 2099 | memcpy(new_bh->b_data, s->base, new_bh->b_size); |
| 2100 | ext4_xattr_block_csum_set(inode, new_bh); |
| 2101 | set_buffer_uptodate(new_bh); |
| 2102 | unlock_buffer(new_bh); |
| 2103 | ext4_xattr_block_cache_insert(ea_block_cache, new_bh); |
| 2104 | error = ext4_handle_dirty_metadata(handle, inode, |
| 2105 | new_bh); |
| 2106 | if (error) |
| 2107 | goto cleanup; |
| 2108 | } |
| 2109 | } |
| 2110 | |
| 2111 | if (old_ea_inode_quota) |
| 2112 | ext4_xattr_inode_free_quota(inode, NULL, old_ea_inode_quota); |
| 2113 | |
| 2114 | /* Update the inode. */ |
| 2115 | EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; |
| 2116 | |
| 2117 | /* Drop the previous xattr block. */ |
| 2118 | if (bs->bh && bs->bh != new_bh) { |
| 2119 | struct ext4_xattr_inode_array *ea_inode_array = NULL; |
| 2120 | |
| 2121 | ext4_xattr_release_block(handle, inode, bs->bh, |
| 2122 | &ea_inode_array, |
| 2123 | 0 /* extra_credits */); |
| 2124 | ext4_xattr_inode_array_free(ea_inode_array); |
| 2125 | } |
| 2126 | error = 0; |
| 2127 | |
| 2128 | cleanup: |
| 2129 | if (ea_inode) { |
| 2130 | int error2; |
| 2131 | |
| 2132 | error2 = ext4_xattr_inode_dec_ref(handle, ea_inode); |
| 2133 | if (error2) |
| 2134 | ext4_warning_inode(ea_inode, "dec ref error=%d", |
| 2135 | error2); |
| 2136 | |
| 2137 | /* If there was an error, revert the quota charge. */ |
| 2138 | if (error) |
| 2139 | ext4_xattr_inode_free_quota(inode, ea_inode, |
| 2140 | i_size_read(ea_inode)); |
| 2141 | iput(ea_inode); |
| 2142 | } |
| 2143 | if (ce) |
| 2144 | mb_cache_entry_put(ea_block_cache, ce); |
| 2145 | brelse(new_bh); |
| 2146 | if (!(bs->bh && s->base == bs->bh->b_data)) |
| 2147 | kfree(s->base); |
| 2148 | |
| 2149 | return error; |
| 2150 | |
| 2151 | cleanup_dquot: |
| 2152 | dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1)); |
| 2153 | goto cleanup; |
| 2154 | |
| 2155 | bad_block: |
| 2156 | EXT4_ERROR_INODE(inode, "bad block %llu", |
| 2157 | EXT4_I(inode)->i_file_acl); |
| 2158 | goto cleanup; |
| 2159 | |
| 2160 | #undef header |
| 2161 | } |
| 2162 | |
| 2163 | int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, |
| 2164 | struct ext4_xattr_ibody_find *is) |
| 2165 | { |
| 2166 | struct ext4_xattr_ibody_header *header; |
| 2167 | struct ext4_inode *raw_inode; |
| 2168 | int error; |
| 2169 | |
| 2170 | if (EXT4_I(inode)->i_extra_isize == 0) |
| 2171 | return 0; |
| 2172 | raw_inode = ext4_raw_inode(&is->iloc); |
| 2173 | header = IHDR(inode, raw_inode); |
| 2174 | is->s.base = is->s.first = IFIRST(header); |
| 2175 | is->s.here = is->s.first; |
| 2176 | is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; |
| 2177 | if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { |
| 2178 | error = xattr_check_inode(inode, header, is->s.end); |
| 2179 | if (error) |
| 2180 | return error; |
| 2181 | /* Find the named attribute. */ |
| 2182 | error = xattr_find_entry(inode, &is->s.here, is->s.end, |
| 2183 | i->name_index, i->name, 0); |
| 2184 | if (error && error != -ENODATA) |
| 2185 | return error; |
| 2186 | is->s.not_found = error; |
| 2187 | } |
| 2188 | return 0; |
| 2189 | } |
| 2190 | |
| 2191 | int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, |
| 2192 | struct ext4_xattr_info *i, |
| 2193 | struct ext4_xattr_ibody_find *is) |
| 2194 | { |
| 2195 | struct ext4_xattr_ibody_header *header; |
| 2196 | struct ext4_xattr_search *s = &is->s; |
| 2197 | int error; |
| 2198 | |
| 2199 | if (EXT4_I(inode)->i_extra_isize == 0) |
| 2200 | return -ENOSPC; |
| 2201 | error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); |
| 2202 | if (error) |
| 2203 | return error; |
| 2204 | header = IHDR(inode, ext4_raw_inode(&is->iloc)); |
| 2205 | if (!IS_LAST_ENTRY(s->first)) { |
| 2206 | header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); |
| 2207 | ext4_set_inode_state(inode, EXT4_STATE_XATTR); |
| 2208 | } else { |
| 2209 | header->h_magic = cpu_to_le32(0); |
| 2210 | ext4_clear_inode_state(inode, EXT4_STATE_XATTR); |
| 2211 | } |
| 2212 | return 0; |
| 2213 | } |
| 2214 | |
| 2215 | static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, |
| 2216 | struct ext4_xattr_info *i, |
| 2217 | struct ext4_xattr_ibody_find *is) |
| 2218 | { |
| 2219 | struct ext4_xattr_ibody_header *header; |
| 2220 | struct ext4_xattr_search *s = &is->s; |
| 2221 | int error; |
| 2222 | |
| 2223 | if (EXT4_I(inode)->i_extra_isize == 0) |
| 2224 | return -ENOSPC; |
| 2225 | error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); |
| 2226 | if (error) |
| 2227 | return error; |
| 2228 | header = IHDR(inode, ext4_raw_inode(&is->iloc)); |
| 2229 | if (!IS_LAST_ENTRY(s->first)) { |
| 2230 | header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); |
| 2231 | ext4_set_inode_state(inode, EXT4_STATE_XATTR); |
| 2232 | } else { |
| 2233 | header->h_magic = cpu_to_le32(0); |
| 2234 | ext4_clear_inode_state(inode, EXT4_STATE_XATTR); |
| 2235 | } |
| 2236 | return 0; |
| 2237 | } |
| 2238 | |
| 2239 | static int ext4_xattr_value_same(struct ext4_xattr_search *s, |
| 2240 | struct ext4_xattr_info *i) |
| 2241 | { |
| 2242 | void *value; |
| 2243 | |
| 2244 | /* When e_value_inum is set the value is stored externally. */ |
| 2245 | if (s->here->e_value_inum) |
| 2246 | return 0; |
| 2247 | if (le32_to_cpu(s->here->e_value_size) != i->value_len) |
| 2248 | return 0; |
| 2249 | value = ((void *)s->base) + le16_to_cpu(s->here->e_value_offs); |
| 2250 | return !memcmp(value, i->value, i->value_len); |
| 2251 | } |
| 2252 | |
| 2253 | static struct buffer_head *ext4_xattr_get_block(struct inode *inode) |
| 2254 | { |
| 2255 | struct buffer_head *bh; |
| 2256 | int error; |
| 2257 | |
| 2258 | if (!EXT4_I(inode)->i_file_acl) |
| 2259 | return NULL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2260 | bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); |
| 2261 | if (IS_ERR(bh)) |
| 2262 | return bh; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2263 | error = ext4_xattr_check_block(inode, bh); |
| 2264 | if (error) { |
| 2265 | brelse(bh); |
| 2266 | return ERR_PTR(error); |
| 2267 | } |
| 2268 | return bh; |
| 2269 | } |
| 2270 | |
| 2271 | /* |
| 2272 | * ext4_xattr_set_handle() |
| 2273 | * |
| 2274 | * Create, replace or remove an extended attribute for this inode. Value |
| 2275 | * is NULL to remove an existing extended attribute, and non-NULL to |
| 2276 | * either replace an existing extended attribute, or create a new extended |
| 2277 | * attribute. The flags XATTR_REPLACE and XATTR_CREATE |
| 2278 | * specify that an extended attribute must exist and must not exist |
| 2279 | * previous to the call, respectively. |
| 2280 | * |
| 2281 | * Returns 0, or a negative error number on failure. |
| 2282 | */ |
| 2283 | int |
| 2284 | ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, |
| 2285 | const char *name, const void *value, size_t value_len, |
| 2286 | int flags) |
| 2287 | { |
| 2288 | struct ext4_xattr_info i = { |
| 2289 | .name_index = name_index, |
| 2290 | .name = name, |
| 2291 | .value = value, |
| 2292 | .value_len = value_len, |
| 2293 | .in_inode = 0, |
| 2294 | }; |
| 2295 | struct ext4_xattr_ibody_find is = { |
| 2296 | .s = { .not_found = -ENODATA, }, |
| 2297 | }; |
| 2298 | struct ext4_xattr_block_find bs = { |
| 2299 | .s = { .not_found = -ENODATA, }, |
| 2300 | }; |
| 2301 | int no_expand; |
| 2302 | int error; |
| 2303 | |
| 2304 | if (!name) |
| 2305 | return -EINVAL; |
| 2306 | if (strlen(name) > 255) |
| 2307 | return -ERANGE; |
| 2308 | |
| 2309 | ext4_write_lock_xattr(inode, &no_expand); |
| 2310 | |
| 2311 | /* Check journal credits under write lock. */ |
| 2312 | if (ext4_handle_valid(handle)) { |
| 2313 | struct buffer_head *bh; |
| 2314 | int credits; |
| 2315 | |
| 2316 | bh = ext4_xattr_get_block(inode); |
| 2317 | if (IS_ERR(bh)) { |
| 2318 | error = PTR_ERR(bh); |
| 2319 | goto cleanup; |
| 2320 | } |
| 2321 | |
| 2322 | credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh, |
| 2323 | value_len, |
| 2324 | flags & XATTR_CREATE); |
| 2325 | brelse(bh); |
| 2326 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2327 | if (jbd2_handle_buffer_credits(handle) < credits) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2328 | error = -ENOSPC; |
| 2329 | goto cleanup; |
| 2330 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 2331 | WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2332 | } |
| 2333 | |
| 2334 | error = ext4_reserve_inode_write(handle, inode, &is.iloc); |
| 2335 | if (error) |
| 2336 | goto cleanup; |
| 2337 | |
| 2338 | if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) { |
| 2339 | struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); |
| 2340 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); |
| 2341 | ext4_clear_inode_state(inode, EXT4_STATE_NEW); |
| 2342 | } |
| 2343 | |
| 2344 | error = ext4_xattr_ibody_find(inode, &i, &is); |
| 2345 | if (error) |
| 2346 | goto cleanup; |
| 2347 | if (is.s.not_found) |
| 2348 | error = ext4_xattr_block_find(inode, &i, &bs); |
| 2349 | if (error) |
| 2350 | goto cleanup; |
| 2351 | if (is.s.not_found && bs.s.not_found) { |
| 2352 | error = -ENODATA; |
| 2353 | if (flags & XATTR_REPLACE) |
| 2354 | goto cleanup; |
| 2355 | error = 0; |
| 2356 | if (!value) |
| 2357 | goto cleanup; |
| 2358 | } else { |
| 2359 | error = -EEXIST; |
| 2360 | if (flags & XATTR_CREATE) |
| 2361 | goto cleanup; |
| 2362 | } |
| 2363 | |
| 2364 | if (!value) { |
| 2365 | if (!is.s.not_found) |
| 2366 | error = ext4_xattr_ibody_set(handle, inode, &i, &is); |
| 2367 | else if (!bs.s.not_found) |
| 2368 | error = ext4_xattr_block_set(handle, inode, &i, &bs); |
| 2369 | } else { |
| 2370 | error = 0; |
| 2371 | /* Xattr value did not change? Save us some work and bail out */ |
| 2372 | if (!is.s.not_found && ext4_xattr_value_same(&is.s, &i)) |
| 2373 | goto cleanup; |
| 2374 | if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i)) |
| 2375 | goto cleanup; |
| 2376 | |
| 2377 | if (ext4_has_feature_ea_inode(inode->i_sb) && |
| 2378 | (EXT4_XATTR_SIZE(i.value_len) > |
| 2379 | EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize))) |
| 2380 | i.in_inode = 1; |
| 2381 | retry_inode: |
| 2382 | error = ext4_xattr_ibody_set(handle, inode, &i, &is); |
| 2383 | if (!error && !bs.s.not_found) { |
| 2384 | i.value = NULL; |
| 2385 | error = ext4_xattr_block_set(handle, inode, &i, &bs); |
| 2386 | } else if (error == -ENOSPC) { |
| 2387 | if (EXT4_I(inode)->i_file_acl && !bs.s.base) { |
| 2388 | brelse(bs.bh); |
| 2389 | bs.bh = NULL; |
| 2390 | error = ext4_xattr_block_find(inode, &i, &bs); |
| 2391 | if (error) |
| 2392 | goto cleanup; |
| 2393 | } |
| 2394 | error = ext4_xattr_block_set(handle, inode, &i, &bs); |
| 2395 | if (!error && !is.s.not_found) { |
| 2396 | i.value = NULL; |
| 2397 | error = ext4_xattr_ibody_set(handle, inode, &i, |
| 2398 | &is); |
| 2399 | } else if (error == -ENOSPC) { |
| 2400 | /* |
| 2401 | * Xattr does not fit in the block, store at |
| 2402 | * external inode if possible. |
| 2403 | */ |
| 2404 | if (ext4_has_feature_ea_inode(inode->i_sb) && |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 2405 | i.value_len && !i.in_inode) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2406 | i.in_inode = 1; |
| 2407 | goto retry_inode; |
| 2408 | } |
| 2409 | } |
| 2410 | } |
| 2411 | } |
| 2412 | if (!error) { |
| 2413 | ext4_xattr_update_super_block(handle, inode->i_sb); |
| 2414 | inode->i_ctime = current_time(inode); |
| 2415 | if (!value) |
| 2416 | no_expand = 0; |
| 2417 | error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); |
| 2418 | /* |
| 2419 | * The bh is consumed by ext4_mark_iloc_dirty, even with |
| 2420 | * error != 0. |
| 2421 | */ |
| 2422 | is.iloc.bh = NULL; |
| 2423 | if (IS_SYNC(inode)) |
| 2424 | ext4_handle_sync(handle); |
| 2425 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2426 | ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2427 | |
| 2428 | cleanup: |
| 2429 | brelse(is.iloc.bh); |
| 2430 | brelse(bs.bh); |
| 2431 | ext4_write_unlock_xattr(inode, &no_expand); |
| 2432 | return error; |
| 2433 | } |
| 2434 | |
| 2435 | int ext4_xattr_set_credits(struct inode *inode, size_t value_len, |
| 2436 | bool is_create, int *credits) |
| 2437 | { |
| 2438 | struct buffer_head *bh; |
| 2439 | int err; |
| 2440 | |
| 2441 | *credits = 0; |
| 2442 | |
| 2443 | if (!EXT4_SB(inode->i_sb)->s_journal) |
| 2444 | return 0; |
| 2445 | |
| 2446 | down_read(&EXT4_I(inode)->xattr_sem); |
| 2447 | |
| 2448 | bh = ext4_xattr_get_block(inode); |
| 2449 | if (IS_ERR(bh)) { |
| 2450 | err = PTR_ERR(bh); |
| 2451 | } else { |
| 2452 | *credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh, |
| 2453 | value_len, is_create); |
| 2454 | brelse(bh); |
| 2455 | err = 0; |
| 2456 | } |
| 2457 | |
| 2458 | up_read(&EXT4_I(inode)->xattr_sem); |
| 2459 | return err; |
| 2460 | } |
| 2461 | |
| 2462 | /* |
| 2463 | * ext4_xattr_set() |
| 2464 | * |
| 2465 | * Like ext4_xattr_set_handle, but start from an inode. This extended |
| 2466 | * attribute modification is a filesystem transaction by itself. |
| 2467 | * |
| 2468 | * Returns 0, or a negative error number on failure. |
| 2469 | */ |
| 2470 | int |
| 2471 | ext4_xattr_set(struct inode *inode, int name_index, const char *name, |
| 2472 | const void *value, size_t value_len, int flags) |
| 2473 | { |
| 2474 | handle_t *handle; |
| 2475 | struct super_block *sb = inode->i_sb; |
| 2476 | int error, retries = 0; |
| 2477 | int credits; |
| 2478 | |
| 2479 | error = dquot_initialize(inode); |
| 2480 | if (error) |
| 2481 | return error; |
| 2482 | |
| 2483 | retry: |
| 2484 | error = ext4_xattr_set_credits(inode, value_len, flags & XATTR_CREATE, |
| 2485 | &credits); |
| 2486 | if (error) |
| 2487 | return error; |
| 2488 | |
| 2489 | handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits); |
| 2490 | if (IS_ERR(handle)) { |
| 2491 | error = PTR_ERR(handle); |
| 2492 | } else { |
| 2493 | int error2; |
| 2494 | |
| 2495 | error = ext4_xattr_set_handle(handle, inode, name_index, name, |
| 2496 | value, value_len, flags); |
| 2497 | error2 = ext4_journal_stop(handle); |
| 2498 | if (error == -ENOSPC && |
| 2499 | ext4_should_retry_alloc(sb, &retries)) |
| 2500 | goto retry; |
| 2501 | if (error == 0) |
| 2502 | error = error2; |
| 2503 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2504 | ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2505 | |
| 2506 | return error; |
| 2507 | } |
| 2508 | |
| 2509 | /* |
| 2510 | * Shift the EA entries in the inode to create space for the increased |
| 2511 | * i_extra_isize. |
| 2512 | */ |
| 2513 | static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry, |
| 2514 | int value_offs_shift, void *to, |
| 2515 | void *from, size_t n) |
| 2516 | { |
| 2517 | struct ext4_xattr_entry *last = entry; |
| 2518 | int new_offs; |
| 2519 | |
| 2520 | /* We always shift xattr headers further thus offsets get lower */ |
| 2521 | BUG_ON(value_offs_shift > 0); |
| 2522 | |
| 2523 | /* Adjust the value offsets of the entries */ |
| 2524 | for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { |
| 2525 | if (!last->e_value_inum && last->e_value_size) { |
| 2526 | new_offs = le16_to_cpu(last->e_value_offs) + |
| 2527 | value_offs_shift; |
| 2528 | last->e_value_offs = cpu_to_le16(new_offs); |
| 2529 | } |
| 2530 | } |
| 2531 | /* Shift the entries by n bytes */ |
| 2532 | memmove(to, from, n); |
| 2533 | } |
| 2534 | |
| 2535 | /* |
| 2536 | * Move xattr pointed to by 'entry' from inode into external xattr block |
| 2537 | */ |
| 2538 | static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, |
| 2539 | struct ext4_inode *raw_inode, |
| 2540 | struct ext4_xattr_entry *entry) |
| 2541 | { |
| 2542 | struct ext4_xattr_ibody_find *is = NULL; |
| 2543 | struct ext4_xattr_block_find *bs = NULL; |
| 2544 | char *buffer = NULL, *b_entry_name = NULL; |
| 2545 | size_t value_size = le32_to_cpu(entry->e_value_size); |
| 2546 | struct ext4_xattr_info i = { |
| 2547 | .value = NULL, |
| 2548 | .value_len = 0, |
| 2549 | .name_index = entry->e_name_index, |
| 2550 | .in_inode = !!entry->e_value_inum, |
| 2551 | }; |
| 2552 | struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode); |
| 2553 | int error; |
| 2554 | |
| 2555 | is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS); |
| 2556 | bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS); |
| 2557 | buffer = kmalloc(value_size, GFP_NOFS); |
| 2558 | b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS); |
| 2559 | if (!is || !bs || !buffer || !b_entry_name) { |
| 2560 | error = -ENOMEM; |
| 2561 | goto out; |
| 2562 | } |
| 2563 | |
| 2564 | is->s.not_found = -ENODATA; |
| 2565 | bs->s.not_found = -ENODATA; |
| 2566 | is->iloc.bh = NULL; |
| 2567 | bs->bh = NULL; |
| 2568 | |
| 2569 | /* Save the entry name and the entry value */ |
| 2570 | if (entry->e_value_inum) { |
| 2571 | error = ext4_xattr_inode_get(inode, entry, buffer, value_size); |
| 2572 | if (error) |
| 2573 | goto out; |
| 2574 | } else { |
| 2575 | size_t value_offs = le16_to_cpu(entry->e_value_offs); |
| 2576 | memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size); |
| 2577 | } |
| 2578 | |
| 2579 | memcpy(b_entry_name, entry->e_name, entry->e_name_len); |
| 2580 | b_entry_name[entry->e_name_len] = '\0'; |
| 2581 | i.name = b_entry_name; |
| 2582 | |
| 2583 | error = ext4_get_inode_loc(inode, &is->iloc); |
| 2584 | if (error) |
| 2585 | goto out; |
| 2586 | |
| 2587 | error = ext4_xattr_ibody_find(inode, &i, is); |
| 2588 | if (error) |
| 2589 | goto out; |
| 2590 | |
| 2591 | /* Remove the chosen entry from the inode */ |
| 2592 | error = ext4_xattr_ibody_set(handle, inode, &i, is); |
| 2593 | if (error) |
| 2594 | goto out; |
| 2595 | |
| 2596 | i.value = buffer; |
| 2597 | i.value_len = value_size; |
| 2598 | error = ext4_xattr_block_find(inode, &i, bs); |
| 2599 | if (error) |
| 2600 | goto out; |
| 2601 | |
| 2602 | /* Add entry which was removed from the inode into the block */ |
| 2603 | error = ext4_xattr_block_set(handle, inode, &i, bs); |
| 2604 | if (error) |
| 2605 | goto out; |
| 2606 | error = 0; |
| 2607 | out: |
| 2608 | kfree(b_entry_name); |
| 2609 | kfree(buffer); |
| 2610 | if (is) |
| 2611 | brelse(is->iloc.bh); |
| 2612 | if (bs) |
| 2613 | brelse(bs->bh); |
| 2614 | kfree(is); |
| 2615 | kfree(bs); |
| 2616 | |
| 2617 | return error; |
| 2618 | } |
| 2619 | |
| 2620 | static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode, |
| 2621 | struct ext4_inode *raw_inode, |
| 2622 | int isize_diff, size_t ifree, |
| 2623 | size_t bfree, int *total_ino) |
| 2624 | { |
| 2625 | struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode); |
| 2626 | struct ext4_xattr_entry *small_entry; |
| 2627 | struct ext4_xattr_entry *entry; |
| 2628 | struct ext4_xattr_entry *last; |
| 2629 | unsigned int entry_size; /* EA entry size */ |
| 2630 | unsigned int total_size; /* EA entry size + value size */ |
| 2631 | unsigned int min_total_size; |
| 2632 | int error; |
| 2633 | |
| 2634 | while (isize_diff > ifree) { |
| 2635 | entry = NULL; |
| 2636 | small_entry = NULL; |
| 2637 | min_total_size = ~0U; |
| 2638 | last = IFIRST(header); |
| 2639 | /* Find the entry best suited to be pushed into EA block */ |
| 2640 | for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { |
| 2641 | /* never move system.data out of the inode */ |
| 2642 | if ((last->e_name_len == 4) && |
| 2643 | (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) && |
| 2644 | !memcmp(last->e_name, "data", 4)) |
| 2645 | continue; |
| 2646 | total_size = EXT4_XATTR_LEN(last->e_name_len); |
| 2647 | if (!last->e_value_inum) |
| 2648 | total_size += EXT4_XATTR_SIZE( |
| 2649 | le32_to_cpu(last->e_value_size)); |
| 2650 | if (total_size <= bfree && |
| 2651 | total_size < min_total_size) { |
| 2652 | if (total_size + ifree < isize_diff) { |
| 2653 | small_entry = last; |
| 2654 | } else { |
| 2655 | entry = last; |
| 2656 | min_total_size = total_size; |
| 2657 | } |
| 2658 | } |
| 2659 | } |
| 2660 | |
| 2661 | if (entry == NULL) { |
| 2662 | if (small_entry == NULL) |
| 2663 | return -ENOSPC; |
| 2664 | entry = small_entry; |
| 2665 | } |
| 2666 | |
| 2667 | entry_size = EXT4_XATTR_LEN(entry->e_name_len); |
| 2668 | total_size = entry_size; |
| 2669 | if (!entry->e_value_inum) |
| 2670 | total_size += EXT4_XATTR_SIZE( |
| 2671 | le32_to_cpu(entry->e_value_size)); |
| 2672 | error = ext4_xattr_move_to_block(handle, inode, raw_inode, |
| 2673 | entry); |
| 2674 | if (error) |
| 2675 | return error; |
| 2676 | |
| 2677 | *total_ino -= entry_size; |
| 2678 | ifree += total_size; |
| 2679 | bfree -= total_size; |
| 2680 | } |
| 2681 | |
| 2682 | return 0; |
| 2683 | } |
| 2684 | |
| 2685 | /* |
| 2686 | * Expand an inode by new_extra_isize bytes when EAs are present. |
| 2687 | * Returns 0 on success or negative error number on failure. |
| 2688 | */ |
| 2689 | int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, |
| 2690 | struct ext4_inode *raw_inode, handle_t *handle) |
| 2691 | { |
| 2692 | struct ext4_xattr_ibody_header *header; |
| 2693 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 2694 | static unsigned int mnt_count; |
| 2695 | size_t min_offs; |
| 2696 | size_t ifree, bfree; |
| 2697 | int total_ino; |
| 2698 | void *base, *end; |
| 2699 | int error = 0, tried_min_extra_isize = 0; |
| 2700 | int s_min_extra_isize = le16_to_cpu(sbi->s_es->s_min_extra_isize); |
| 2701 | int isize_diff; /* How much do we need to grow i_extra_isize */ |
| 2702 | |
| 2703 | retry: |
| 2704 | isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize; |
| 2705 | if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) |
| 2706 | return 0; |
| 2707 | |
| 2708 | header = IHDR(inode, raw_inode); |
| 2709 | |
| 2710 | /* |
| 2711 | * Check if enough free space is available in the inode to shift the |
| 2712 | * entries ahead by new_extra_isize. |
| 2713 | */ |
| 2714 | |
| 2715 | base = IFIRST(header); |
| 2716 | end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; |
| 2717 | min_offs = end - base; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2718 | total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2719 | |
| 2720 | error = xattr_check_inode(inode, header, end); |
| 2721 | if (error) |
| 2722 | goto cleanup; |
| 2723 | |
| 2724 | ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino); |
| 2725 | if (ifree >= isize_diff) |
| 2726 | goto shift; |
| 2727 | |
| 2728 | /* |
| 2729 | * Enough free space isn't available in the inode, check if |
| 2730 | * EA block can hold new_extra_isize bytes. |
| 2731 | */ |
| 2732 | if (EXT4_I(inode)->i_file_acl) { |
| 2733 | struct buffer_head *bh; |
| 2734 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2735 | bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); |
| 2736 | if (IS_ERR(bh)) { |
| 2737 | error = PTR_ERR(bh); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2738 | goto cleanup; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2739 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2740 | error = ext4_xattr_check_block(inode, bh); |
| 2741 | if (error) { |
| 2742 | brelse(bh); |
| 2743 | goto cleanup; |
| 2744 | } |
| 2745 | base = BHDR(bh); |
| 2746 | end = bh->b_data + bh->b_size; |
| 2747 | min_offs = end - base; |
| 2748 | bfree = ext4_xattr_free_space(BFIRST(bh), &min_offs, base, |
| 2749 | NULL); |
| 2750 | brelse(bh); |
| 2751 | if (bfree + ifree < isize_diff) { |
| 2752 | if (!tried_min_extra_isize && s_min_extra_isize) { |
| 2753 | tried_min_extra_isize++; |
| 2754 | new_extra_isize = s_min_extra_isize; |
| 2755 | goto retry; |
| 2756 | } |
| 2757 | error = -ENOSPC; |
| 2758 | goto cleanup; |
| 2759 | } |
| 2760 | } else { |
| 2761 | bfree = inode->i_sb->s_blocksize; |
| 2762 | } |
| 2763 | |
| 2764 | error = ext4_xattr_make_inode_space(handle, inode, raw_inode, |
| 2765 | isize_diff, ifree, bfree, |
| 2766 | &total_ino); |
| 2767 | if (error) { |
| 2768 | if (error == -ENOSPC && !tried_min_extra_isize && |
| 2769 | s_min_extra_isize) { |
| 2770 | tried_min_extra_isize++; |
| 2771 | new_extra_isize = s_min_extra_isize; |
| 2772 | goto retry; |
| 2773 | } |
| 2774 | goto cleanup; |
| 2775 | } |
| 2776 | shift: |
| 2777 | /* Adjust the offsets and shift the remaining entries ahead */ |
| 2778 | ext4_xattr_shift_entries(IFIRST(header), EXT4_I(inode)->i_extra_isize |
| 2779 | - new_extra_isize, (void *)raw_inode + |
| 2780 | EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize, |
| 2781 | (void *)header, total_ino); |
| 2782 | EXT4_I(inode)->i_extra_isize = new_extra_isize; |
| 2783 | |
| 2784 | cleanup: |
| 2785 | if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) { |
| 2786 | ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.", |
| 2787 | inode->i_ino); |
| 2788 | mnt_count = le16_to_cpu(sbi->s_es->s_mnt_count); |
| 2789 | } |
| 2790 | return error; |
| 2791 | } |
| 2792 | |
| 2793 | #define EIA_INCR 16 /* must be 2^n */ |
| 2794 | #define EIA_MASK (EIA_INCR - 1) |
| 2795 | |
| 2796 | /* Add the large xattr @inode into @ea_inode_array for deferred iput(). |
| 2797 | * If @ea_inode_array is new or full it will be grown and the old |
| 2798 | * contents copied over. |
| 2799 | */ |
| 2800 | static int |
| 2801 | ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array, |
| 2802 | struct inode *inode) |
| 2803 | { |
| 2804 | if (*ea_inode_array == NULL) { |
| 2805 | /* |
| 2806 | * Start with 15 inodes, so it fits into a power-of-two size. |
| 2807 | * If *ea_inode_array is NULL, this is essentially offsetof() |
| 2808 | */ |
| 2809 | (*ea_inode_array) = |
| 2810 | kmalloc(offsetof(struct ext4_xattr_inode_array, |
| 2811 | inodes[EIA_MASK]), |
| 2812 | GFP_NOFS); |
| 2813 | if (*ea_inode_array == NULL) |
| 2814 | return -ENOMEM; |
| 2815 | (*ea_inode_array)->count = 0; |
| 2816 | } else if (((*ea_inode_array)->count & EIA_MASK) == EIA_MASK) { |
| 2817 | /* expand the array once all 15 + n * 16 slots are full */ |
| 2818 | struct ext4_xattr_inode_array *new_array = NULL; |
| 2819 | int count = (*ea_inode_array)->count; |
| 2820 | |
| 2821 | /* if new_array is NULL, this is essentially offsetof() */ |
| 2822 | new_array = kmalloc( |
| 2823 | offsetof(struct ext4_xattr_inode_array, |
| 2824 | inodes[count + EIA_INCR]), |
| 2825 | GFP_NOFS); |
| 2826 | if (new_array == NULL) |
| 2827 | return -ENOMEM; |
| 2828 | memcpy(new_array, *ea_inode_array, |
| 2829 | offsetof(struct ext4_xattr_inode_array, inodes[count])); |
| 2830 | kfree(*ea_inode_array); |
| 2831 | *ea_inode_array = new_array; |
| 2832 | } |
| 2833 | (*ea_inode_array)->inodes[(*ea_inode_array)->count++] = inode; |
| 2834 | return 0; |
| 2835 | } |
| 2836 | |
| 2837 | /* |
| 2838 | * ext4_xattr_delete_inode() |
| 2839 | * |
| 2840 | * Free extended attribute resources associated with this inode. Traverse |
| 2841 | * all entries and decrement reference on any xattr inodes associated with this |
| 2842 | * inode. This is called immediately before an inode is freed. We have exclusive |
| 2843 | * access to the inode. If an orphan inode is deleted it will also release its |
| 2844 | * references on xattr block and xattr inodes. |
| 2845 | */ |
| 2846 | int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode, |
| 2847 | struct ext4_xattr_inode_array **ea_inode_array, |
| 2848 | int extra_credits) |
| 2849 | { |
| 2850 | struct buffer_head *bh = NULL; |
| 2851 | struct ext4_xattr_ibody_header *header; |
| 2852 | struct ext4_iloc iloc = { .bh = NULL }; |
| 2853 | struct ext4_xattr_entry *entry; |
| 2854 | struct inode *ea_inode; |
| 2855 | int error; |
| 2856 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2857 | error = ext4_journal_ensure_credits(handle, extra_credits, |
| 2858 | ext4_free_metadata_revoke_credits(inode->i_sb, 1)); |
| 2859 | if (error < 0) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2860 | EXT4_ERROR_INODE(inode, "ensure credits (error %d)", error); |
| 2861 | goto cleanup; |
| 2862 | } |
| 2863 | |
| 2864 | if (ext4_has_feature_ea_inode(inode->i_sb) && |
| 2865 | ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { |
| 2866 | |
| 2867 | error = ext4_get_inode_loc(inode, &iloc); |
| 2868 | if (error) { |
| 2869 | EXT4_ERROR_INODE(inode, "inode loc (error %d)", error); |
| 2870 | goto cleanup; |
| 2871 | } |
| 2872 | |
| 2873 | error = ext4_journal_get_write_access(handle, iloc.bh); |
| 2874 | if (error) { |
| 2875 | EXT4_ERROR_INODE(inode, "write access (error %d)", |
| 2876 | error); |
| 2877 | goto cleanup; |
| 2878 | } |
| 2879 | |
| 2880 | header = IHDR(inode, ext4_raw_inode(&iloc)); |
| 2881 | if (header->h_magic == cpu_to_le32(EXT4_XATTR_MAGIC)) |
| 2882 | ext4_xattr_inode_dec_ref_all(handle, inode, iloc.bh, |
| 2883 | IFIRST(header), |
| 2884 | false /* block_csum */, |
| 2885 | ea_inode_array, |
| 2886 | extra_credits, |
| 2887 | false /* skip_quota */); |
| 2888 | } |
| 2889 | |
| 2890 | if (EXT4_I(inode)->i_file_acl) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2891 | bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); |
| 2892 | if (IS_ERR(bh)) { |
| 2893 | error = PTR_ERR(bh); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2894 | if (error == -EIO) { |
| 2895 | EXT4_ERROR_INODE_ERR(inode, EIO, |
| 2896 | "block %llu read error", |
| 2897 | EXT4_I(inode)->i_file_acl); |
| 2898 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2899 | bh = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2900 | goto cleanup; |
| 2901 | } |
| 2902 | error = ext4_xattr_check_block(inode, bh); |
| 2903 | if (error) |
| 2904 | goto cleanup; |
| 2905 | |
| 2906 | if (ext4_has_feature_ea_inode(inode->i_sb)) { |
| 2907 | for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry); |
| 2908 | entry = EXT4_XATTR_NEXT(entry)) { |
| 2909 | if (!entry->e_value_inum) |
| 2910 | continue; |
| 2911 | error = ext4_xattr_inode_iget(inode, |
| 2912 | le32_to_cpu(entry->e_value_inum), |
| 2913 | le32_to_cpu(entry->e_hash), |
| 2914 | &ea_inode); |
| 2915 | if (error) |
| 2916 | continue; |
| 2917 | ext4_xattr_inode_free_quota(inode, ea_inode, |
| 2918 | le32_to_cpu(entry->e_value_size)); |
| 2919 | iput(ea_inode); |
| 2920 | } |
| 2921 | |
| 2922 | } |
| 2923 | |
| 2924 | ext4_xattr_release_block(handle, inode, bh, ea_inode_array, |
| 2925 | extra_credits); |
| 2926 | /* |
| 2927 | * Update i_file_acl value in the same transaction that releases |
| 2928 | * block. |
| 2929 | */ |
| 2930 | EXT4_I(inode)->i_file_acl = 0; |
| 2931 | error = ext4_mark_inode_dirty(handle, inode); |
| 2932 | if (error) { |
| 2933 | EXT4_ERROR_INODE(inode, "mark inode dirty (error %d)", |
| 2934 | error); |
| 2935 | goto cleanup; |
| 2936 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2937 | ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2938 | } |
| 2939 | error = 0; |
| 2940 | cleanup: |
| 2941 | brelse(iloc.bh); |
| 2942 | brelse(bh); |
| 2943 | return error; |
| 2944 | } |
| 2945 | |
| 2946 | void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *ea_inode_array) |
| 2947 | { |
| 2948 | int idx; |
| 2949 | |
| 2950 | if (ea_inode_array == NULL) |
| 2951 | return; |
| 2952 | |
| 2953 | for (idx = 0; idx < ea_inode_array->count; ++idx) |
| 2954 | iput(ea_inode_array->inodes[idx]); |
| 2955 | kfree(ea_inode_array); |
| 2956 | } |
| 2957 | |
| 2958 | /* |
| 2959 | * ext4_xattr_block_cache_insert() |
| 2960 | * |
| 2961 | * Create a new entry in the extended attribute block cache, and insert |
| 2962 | * it unless such an entry is already in the cache. |
| 2963 | * |
| 2964 | * Returns 0, or a negative error number on failure. |
| 2965 | */ |
| 2966 | static void |
| 2967 | ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache, |
| 2968 | struct buffer_head *bh) |
| 2969 | { |
| 2970 | struct ext4_xattr_header *header = BHDR(bh); |
| 2971 | __u32 hash = le32_to_cpu(header->h_hash); |
| 2972 | int reusable = le32_to_cpu(header->h_refcount) < |
| 2973 | EXT4_XATTR_REFCOUNT_MAX; |
| 2974 | int error; |
| 2975 | |
| 2976 | if (!ea_block_cache) |
| 2977 | return; |
| 2978 | error = mb_cache_entry_create(ea_block_cache, GFP_NOFS, hash, |
| 2979 | bh->b_blocknr, reusable); |
| 2980 | if (error) { |
| 2981 | if (error == -EBUSY) |
| 2982 | ea_bdebug(bh, "already in cache"); |
| 2983 | } else |
| 2984 | ea_bdebug(bh, "inserting [%x]", (int)hash); |
| 2985 | } |
| 2986 | |
| 2987 | /* |
| 2988 | * ext4_xattr_cmp() |
| 2989 | * |
| 2990 | * Compare two extended attribute blocks for equality. |
| 2991 | * |
| 2992 | * Returns 0 if the blocks are equal, 1 if they differ, and |
| 2993 | * a negative error number on errors. |
| 2994 | */ |
| 2995 | static int |
| 2996 | ext4_xattr_cmp(struct ext4_xattr_header *header1, |
| 2997 | struct ext4_xattr_header *header2) |
| 2998 | { |
| 2999 | struct ext4_xattr_entry *entry1, *entry2; |
| 3000 | |
| 3001 | entry1 = ENTRY(header1+1); |
| 3002 | entry2 = ENTRY(header2+1); |
| 3003 | while (!IS_LAST_ENTRY(entry1)) { |
| 3004 | if (IS_LAST_ENTRY(entry2)) |
| 3005 | return 1; |
| 3006 | if (entry1->e_hash != entry2->e_hash || |
| 3007 | entry1->e_name_index != entry2->e_name_index || |
| 3008 | entry1->e_name_len != entry2->e_name_len || |
| 3009 | entry1->e_value_size != entry2->e_value_size || |
| 3010 | entry1->e_value_inum != entry2->e_value_inum || |
| 3011 | memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) |
| 3012 | return 1; |
| 3013 | if (!entry1->e_value_inum && |
| 3014 | memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), |
| 3015 | (char *)header2 + le16_to_cpu(entry2->e_value_offs), |
| 3016 | le32_to_cpu(entry1->e_value_size))) |
| 3017 | return 1; |
| 3018 | |
| 3019 | entry1 = EXT4_XATTR_NEXT(entry1); |
| 3020 | entry2 = EXT4_XATTR_NEXT(entry2); |
| 3021 | } |
| 3022 | if (!IS_LAST_ENTRY(entry2)) |
| 3023 | return 1; |
| 3024 | return 0; |
| 3025 | } |
| 3026 | |
| 3027 | /* |
| 3028 | * ext4_xattr_block_cache_find() |
| 3029 | * |
| 3030 | * Find an identical extended attribute block. |
| 3031 | * |
| 3032 | * Returns a pointer to the block found, or NULL if such a block was |
| 3033 | * not found or an error occurred. |
| 3034 | */ |
| 3035 | static struct buffer_head * |
| 3036 | ext4_xattr_block_cache_find(struct inode *inode, |
| 3037 | struct ext4_xattr_header *header, |
| 3038 | struct mb_cache_entry **pce) |
| 3039 | { |
| 3040 | __u32 hash = le32_to_cpu(header->h_hash); |
| 3041 | struct mb_cache_entry *ce; |
| 3042 | struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); |
| 3043 | |
| 3044 | if (!ea_block_cache) |
| 3045 | return NULL; |
| 3046 | if (!header->h_hash) |
| 3047 | return NULL; /* never share */ |
| 3048 | ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); |
| 3049 | ce = mb_cache_entry_find_first(ea_block_cache, hash); |
| 3050 | while (ce) { |
| 3051 | struct buffer_head *bh; |
| 3052 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3053 | bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO); |
| 3054 | if (IS_ERR(bh)) { |
| 3055 | if (PTR_ERR(bh) == -ENOMEM) |
| 3056 | return NULL; |
| 3057 | bh = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3058 | EXT4_ERROR_INODE(inode, "block %lu read error", |
| 3059 | (unsigned long)ce->e_value); |
| 3060 | } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) { |
| 3061 | *pce = ce; |
| 3062 | return bh; |
| 3063 | } |
| 3064 | brelse(bh); |
| 3065 | ce = mb_cache_entry_find_next(ea_block_cache, ce); |
| 3066 | } |
| 3067 | return NULL; |
| 3068 | } |
| 3069 | |
| 3070 | #define NAME_HASH_SHIFT 5 |
| 3071 | #define VALUE_HASH_SHIFT 16 |
| 3072 | |
| 3073 | /* |
| 3074 | * ext4_xattr_hash_entry() |
| 3075 | * |
| 3076 | * Compute the hash of an extended attribute. |
| 3077 | */ |
| 3078 | static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value, |
| 3079 | size_t value_count) |
| 3080 | { |
| 3081 | __u32 hash = 0; |
| 3082 | |
| 3083 | while (name_len--) { |
| 3084 | hash = (hash << NAME_HASH_SHIFT) ^ |
| 3085 | (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ |
| 3086 | *name++; |
| 3087 | } |
| 3088 | while (value_count--) { |
| 3089 | hash = (hash << VALUE_HASH_SHIFT) ^ |
| 3090 | (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ |
| 3091 | le32_to_cpu(*value++); |
| 3092 | } |
| 3093 | return cpu_to_le32(hash); |
| 3094 | } |
| 3095 | |
| 3096 | #undef NAME_HASH_SHIFT |
| 3097 | #undef VALUE_HASH_SHIFT |
| 3098 | |
| 3099 | #define BLOCK_HASH_SHIFT 16 |
| 3100 | |
| 3101 | /* |
| 3102 | * ext4_xattr_rehash() |
| 3103 | * |
| 3104 | * Re-compute the extended attribute hash value after an entry has changed. |
| 3105 | */ |
| 3106 | static void ext4_xattr_rehash(struct ext4_xattr_header *header) |
| 3107 | { |
| 3108 | struct ext4_xattr_entry *here; |
| 3109 | __u32 hash = 0; |
| 3110 | |
| 3111 | here = ENTRY(header+1); |
| 3112 | while (!IS_LAST_ENTRY(here)) { |
| 3113 | if (!here->e_hash) { |
| 3114 | /* Block is not shared if an entry's hash value == 0 */ |
| 3115 | hash = 0; |
| 3116 | break; |
| 3117 | } |
| 3118 | hash = (hash << BLOCK_HASH_SHIFT) ^ |
| 3119 | (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ |
| 3120 | le32_to_cpu(here->e_hash); |
| 3121 | here = EXT4_XATTR_NEXT(here); |
| 3122 | } |
| 3123 | header->h_hash = cpu_to_le32(hash); |
| 3124 | } |
| 3125 | |
| 3126 | #undef BLOCK_HASH_SHIFT |
| 3127 | |
| 3128 | #define HASH_BUCKET_BITS 10 |
| 3129 | |
| 3130 | struct mb_cache * |
| 3131 | ext4_xattr_create_cache(void) |
| 3132 | { |
| 3133 | return mb_cache_create(HASH_BUCKET_BITS); |
| 3134 | } |
| 3135 | |
| 3136 | void ext4_xattr_destroy_cache(struct mb_cache *cache) |
| 3137 | { |
| 3138 | if (cache) |
| 3139 | mb_cache_destroy(cache); |
| 3140 | } |
| 3141 | |