blob: 38531c5e16c60b5ea28db14824ce496d398981c7 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/xattr.c
4 *
5 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
6 *
7 * Fix by Harrison Xing <harrison@mountainviewdata.com>.
8 * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>.
9 * Extended attributes for symlinks and special files added per
10 * suggestion of Luka Renko <luka.renko@hermes.si>.
11 * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
12 * Red Hat Inc.
13 * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
14 * and Andreas Gruenbacher <agruen@suse.de>.
15 */
16
17/*
18 * Extended attributes are stored directly in inodes (on file systems with
19 * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
20 * field contains the block number if an inode uses an additional block. All
21 * attributes must fit in the inode and one additional block. Blocks that
22 * contain the identical set of attributes may be shared among several inodes.
23 * Identical blocks are detected by keeping a cache of blocks that have
24 * recently been accessed.
25 *
26 * The attributes in inodes and on blocks have a different header; the entries
27 * are stored in the same format:
28 *
29 * +------------------+
30 * | header |
31 * | entry 1 | |
32 * | entry 2 | | growing downwards
33 * | entry 3 | v
34 * | four null bytes |
35 * | . . . |
36 * | value 1 | ^
37 * | value 3 | | growing upwards
38 * | value 2 | |
39 * +------------------+
40 *
41 * The header is followed by multiple entry descriptors. In disk blocks, the
42 * entry descriptors are kept sorted. In inodes, they are unsorted. The
43 * attribute values are aligned to the end of the block in no specific order.
44 *
45 * Locking strategy
46 * ----------------
47 * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem.
48 * EA blocks are only changed if they are exclusive to an inode, so
49 * holding xattr_sem also means that nothing but the EA block's reference
50 * count can change. Multiple writers to the same block are synchronized
51 * by the buffer lock.
52 */
53
54#include <linux/init.h>
55#include <linux/fs.h>
56#include <linux/slab.h>
57#include <linux/mbcache.h>
58#include <linux/quotaops.h>
59#include <linux/iversion.h>
60#include "ext4_jbd2.h"
61#include "ext4.h"
62#include "xattr.h"
63#include "acl.h"
64
65#ifdef EXT4_XATTR_DEBUG
66# define ea_idebug(inode, fmt, ...) \
67 printk(KERN_DEBUG "inode %s:%lu: " fmt "\n", \
68 inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__)
69# define ea_bdebug(bh, fmt, ...) \
70 printk(KERN_DEBUG "block %pg:%lu: " fmt "\n", \
71 bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
72#else
73# define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
74# define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
75#endif
76
77static void ext4_xattr_block_cache_insert(struct mb_cache *,
78 struct buffer_head *);
79static struct buffer_head *
80ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *,
81 struct mb_cache_entry **);
82static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
83 size_t value_count);
84static void ext4_xattr_rehash(struct ext4_xattr_header *);
85
86static const struct xattr_handler * const ext4_xattr_handler_map[] = {
87 [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
88#ifdef CONFIG_EXT4_FS_POSIX_ACL
89 [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
90 [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
91#endif
92 [EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler,
93#ifdef CONFIG_EXT4_FS_SECURITY
94 [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler,
95#endif
Olivier Deprez157378f2022-04-04 15:47:50 +020096 [EXT4_XATTR_INDEX_HURD] = &ext4_xattr_hurd_handler,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097};
98
99const struct xattr_handler *ext4_xattr_handlers[] = {
100 &ext4_xattr_user_handler,
101 &ext4_xattr_trusted_handler,
102#ifdef CONFIG_EXT4_FS_POSIX_ACL
103 &posix_acl_access_xattr_handler,
104 &posix_acl_default_xattr_handler,
105#endif
106#ifdef CONFIG_EXT4_FS_SECURITY
107 &ext4_xattr_security_handler,
108#endif
Olivier Deprez157378f2022-04-04 15:47:50 +0200109 &ext4_xattr_hurd_handler,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110 NULL
111};
112
113#define EA_BLOCK_CACHE(inode) (((struct ext4_sb_info *) \
114 inode->i_sb->s_fs_info)->s_ea_block_cache)
115
116#define EA_INODE_CACHE(inode) (((struct ext4_sb_info *) \
117 inode->i_sb->s_fs_info)->s_ea_inode_cache)
118
119static int
120ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
121 struct inode *inode);
122
123#ifdef CONFIG_LOCKDEP
124void ext4_xattr_inode_set_class(struct inode *ea_inode)
125{
126 lockdep_set_subclass(&ea_inode->i_rwsem, 1);
127}
128#endif
129
130static __le32 ext4_xattr_block_csum(struct inode *inode,
131 sector_t block_nr,
132 struct ext4_xattr_header *hdr)
133{
134 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
135 __u32 csum;
136 __le64 dsk_block_nr = cpu_to_le64(block_nr);
137 __u32 dummy_csum = 0;
138 int offset = offsetof(struct ext4_xattr_header, h_checksum);
139
140 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
141 sizeof(dsk_block_nr));
142 csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
143 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
144 offset += sizeof(dummy_csum);
145 csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
146 EXT4_BLOCK_SIZE(inode->i_sb) - offset);
147
148 return cpu_to_le32(csum);
149}
150
151static int ext4_xattr_block_csum_verify(struct inode *inode,
152 struct buffer_head *bh)
153{
154 struct ext4_xattr_header *hdr = BHDR(bh);
155 int ret = 1;
156
157 if (ext4_has_metadata_csum(inode->i_sb)) {
158 lock_buffer(bh);
159 ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
160 bh->b_blocknr, hdr));
161 unlock_buffer(bh);
162 }
163 return ret;
164}
165
166static void ext4_xattr_block_csum_set(struct inode *inode,
167 struct buffer_head *bh)
168{
169 if (ext4_has_metadata_csum(inode->i_sb))
170 BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
171 bh->b_blocknr, BHDR(bh));
172}
173
174static inline const struct xattr_handler *
175ext4_xattr_handler(int name_index)
176{
177 const struct xattr_handler *handler = NULL;
178
179 if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
180 handler = ext4_xattr_handler_map[name_index];
181 return handler;
182}
183
184static int
185ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
186 void *value_start)
187{
188 struct ext4_xattr_entry *e = entry;
189
190 /* Find the end of the names list */
191 while (!IS_LAST_ENTRY(e)) {
192 struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
193 if ((void *)next >= end)
194 return -EFSCORRUPTED;
195 if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
196 return -EFSCORRUPTED;
197 e = next;
198 }
199
200 /* Check the values */
201 while (!IS_LAST_ENTRY(entry)) {
202 u32 size = le32_to_cpu(entry->e_value_size);
203
204 if (size > EXT4_XATTR_SIZE_MAX)
205 return -EFSCORRUPTED;
206
207 if (size != 0 && entry->e_value_inum == 0) {
208 u16 offs = le16_to_cpu(entry->e_value_offs);
209 void *value;
210
211 /*
212 * The value cannot overlap the names, and the value
213 * with padding cannot extend beyond 'end'. Check both
214 * the padded and unpadded sizes, since the size may
215 * overflow to 0 when adding padding.
216 */
217 if (offs > end - value_start)
218 return -EFSCORRUPTED;
219 value = value_start + offs;
220 if (value < (void *)e + sizeof(u32) ||
221 size > end - value ||
222 EXT4_XATTR_SIZE(size) > end - value)
223 return -EFSCORRUPTED;
224 }
225 entry = EXT4_XATTR_NEXT(entry);
226 }
227
228 return 0;
229}
230
231static inline int
232__ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
233 const char *function, unsigned int line)
234{
235 int error = -EFSCORRUPTED;
236
237 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
238 BHDR(bh)->h_blocks != cpu_to_le32(1))
239 goto errout;
240 if (buffer_verified(bh))
241 return 0;
242
243 error = -EFSBADCRC;
244 if (!ext4_xattr_block_csum_verify(inode, bh))
245 goto errout;
246 error = ext4_xattr_check_entries(BFIRST(bh), bh->b_data + bh->b_size,
247 bh->b_data);
248errout:
249 if (error)
Olivier Deprez157378f2022-04-04 15:47:50 +0200250 __ext4_error_inode(inode, function, line, 0, -error,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000251 "corrupted xattr block %llu",
252 (unsigned long long) bh->b_blocknr);
253 else
254 set_buffer_verified(bh);
255 return error;
256}
257
258#define ext4_xattr_check_block(inode, bh) \
259 __ext4_xattr_check_block((inode), (bh), __func__, __LINE__)
260
261
262static int
263__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
264 void *end, const char *function, unsigned int line)
265{
266 int error = -EFSCORRUPTED;
267
268 if (end - (void *)header < sizeof(*header) + sizeof(u32) ||
269 (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
270 goto errout;
271 error = ext4_xattr_check_entries(IFIRST(header), end, IFIRST(header));
272errout:
273 if (error)
Olivier Deprez157378f2022-04-04 15:47:50 +0200274 __ext4_error_inode(inode, function, line, 0, -error,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000275 "corrupted in-inode xattr");
276 return error;
277}
278
279#define xattr_check_inode(inode, header, end) \
280 __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
281
282static int
283xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
284 void *end, int name_index, const char *name, int sorted)
285{
286 struct ext4_xattr_entry *entry, *next;
287 size_t name_len;
288 int cmp = 1;
289
290 if (name == NULL)
291 return -EINVAL;
292 name_len = strlen(name);
293 for (entry = *pentry; !IS_LAST_ENTRY(entry); entry = next) {
294 next = EXT4_XATTR_NEXT(entry);
295 if ((void *) next >= end) {
296 EXT4_ERROR_INODE(inode, "corrupted xattr entries");
297 return -EFSCORRUPTED;
298 }
299 cmp = name_index - entry->e_name_index;
300 if (!cmp)
301 cmp = name_len - entry->e_name_len;
302 if (!cmp)
303 cmp = memcmp(name, entry->e_name, name_len);
304 if (cmp <= 0 && (sorted || cmp == 0))
305 break;
306 }
307 *pentry = entry;
308 return cmp ? -ENODATA : 0;
309}
310
311static u32
312ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
313{
314 return ext4_chksum(sbi, sbi->s_csum_seed, buffer, size);
315}
316
317static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
318{
319 return ((u64)ea_inode->i_ctime.tv_sec << 32) |
320 (u32) inode_peek_iversion_raw(ea_inode);
321}
322
323static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
324{
325 ea_inode->i_ctime.tv_sec = (u32)(ref_count >> 32);
326 inode_set_iversion_raw(ea_inode, ref_count & 0xffffffff);
327}
328
329static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode)
330{
331 return (u32)ea_inode->i_atime.tv_sec;
332}
333
334static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash)
335{
336 ea_inode->i_atime.tv_sec = hash;
337}
338
339/*
340 * Read the EA value from an inode.
341 */
342static int ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t size)
343{
344 int blocksize = 1 << ea_inode->i_blkbits;
345 int bh_count = (size + blocksize - 1) >> ea_inode->i_blkbits;
346 int tail_size = (size % blocksize) ?: blocksize;
347 struct buffer_head *bhs_inline[8];
348 struct buffer_head **bhs = bhs_inline;
349 int i, ret;
350
351 if (bh_count > ARRAY_SIZE(bhs_inline)) {
352 bhs = kmalloc_array(bh_count, sizeof(*bhs), GFP_NOFS);
353 if (!bhs)
354 return -ENOMEM;
355 }
356
357 ret = ext4_bread_batch(ea_inode, 0 /* block */, bh_count,
358 true /* wait */, bhs);
359 if (ret)
360 goto free_bhs;
361
362 for (i = 0; i < bh_count; i++) {
363 /* There shouldn't be any holes in ea_inode. */
364 if (!bhs[i]) {
365 ret = -EFSCORRUPTED;
366 goto put_bhs;
367 }
368 memcpy((char *)buf + blocksize * i, bhs[i]->b_data,
369 i < bh_count - 1 ? blocksize : tail_size);
370 }
371 ret = 0;
372put_bhs:
373 for (i = 0; i < bh_count; i++)
374 brelse(bhs[i]);
375free_bhs:
376 if (bhs != bhs_inline)
377 kfree(bhs);
378 return ret;
379}
380
381#define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec)
382
383static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
384 u32 ea_inode_hash, struct inode **ea_inode)
385{
386 struct inode *inode;
387 int err;
388
David Brazdil0f672f62019-12-10 10:32:29 +0000389 inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000390 if (IS_ERR(inode)) {
391 err = PTR_ERR(inode);
392 ext4_error(parent->i_sb,
393 "error while reading EA inode %lu err=%d", ea_ino,
394 err);
395 return err;
396 }
397
398 if (is_bad_inode(inode)) {
399 ext4_error(parent->i_sb,
400 "error while reading EA inode %lu is_bad_inode",
401 ea_ino);
402 err = -EIO;
403 goto error;
404 }
405
406 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
407 ext4_error(parent->i_sb,
408 "EA inode %lu does not have EXT4_EA_INODE_FL flag",
409 ea_ino);
410 err = -EINVAL;
411 goto error;
412 }
413
414 ext4_xattr_inode_set_class(inode);
415
416 /*
417 * Check whether this is an old Lustre-style xattr inode. Lustre
418 * implementation does not have hash validation, rather it has a
419 * backpointer from ea_inode to the parent inode.
420 */
421 if (ea_inode_hash != ext4_xattr_inode_get_hash(inode) &&
422 EXT4_XATTR_INODE_GET_PARENT(inode) == parent->i_ino &&
423 inode->i_generation == parent->i_generation) {
424 ext4_set_inode_state(inode, EXT4_STATE_LUSTRE_EA_INODE);
425 ext4_xattr_inode_set_ref(inode, 1);
426 } else {
427 inode_lock(inode);
428 inode->i_flags |= S_NOQUOTA;
429 inode_unlock(inode);
430 }
431
432 *ea_inode = inode;
433 return 0;
434error:
435 iput(inode);
436 return err;
437}
438
439static int
440ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
441 struct ext4_xattr_entry *entry, void *buffer,
442 size_t size)
443{
444 u32 hash;
445
446 /* Verify stored hash matches calculated hash. */
447 hash = ext4_xattr_inode_hash(EXT4_SB(ea_inode->i_sb), buffer, size);
448 if (hash != ext4_xattr_inode_get_hash(ea_inode))
449 return -EFSCORRUPTED;
450
451 if (entry) {
452 __le32 e_hash, tmp_data;
453
454 /* Verify entry hash. */
455 tmp_data = cpu_to_le32(hash);
456 e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len,
457 &tmp_data, 1);
458 if (e_hash != entry->e_hash)
459 return -EFSCORRUPTED;
460 }
461 return 0;
462}
463
464/*
465 * Read xattr value from the EA inode.
466 */
467static int
468ext4_xattr_inode_get(struct inode *inode, struct ext4_xattr_entry *entry,
469 void *buffer, size_t size)
470{
471 struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
472 struct inode *ea_inode;
473 int err;
474
475 err = ext4_xattr_inode_iget(inode, le32_to_cpu(entry->e_value_inum),
476 le32_to_cpu(entry->e_hash), &ea_inode);
477 if (err) {
478 ea_inode = NULL;
479 goto out;
480 }
481
482 if (i_size_read(ea_inode) != size) {
483 ext4_warning_inode(ea_inode,
484 "ea_inode file size=%llu entry size=%zu",
485 i_size_read(ea_inode), size);
486 err = -EFSCORRUPTED;
487 goto out;
488 }
489
490 err = ext4_xattr_inode_read(ea_inode, buffer, size);
491 if (err)
492 goto out;
493
494 if (!ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE)) {
495 err = ext4_xattr_inode_verify_hashes(ea_inode, entry, buffer,
496 size);
497 if (err) {
498 ext4_warning_inode(ea_inode,
499 "EA inode hash validation failed");
500 goto out;
501 }
502
503 if (ea_inode_cache)
504 mb_cache_entry_create(ea_inode_cache, GFP_NOFS,
505 ext4_xattr_inode_get_hash(ea_inode),
506 ea_inode->i_ino, true /* reusable */);
507 }
508out:
509 iput(ea_inode);
510 return err;
511}
512
513static int
514ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
515 void *buffer, size_t buffer_size)
516{
517 struct buffer_head *bh = NULL;
518 struct ext4_xattr_entry *entry;
519 size_t size;
520 void *end;
521 int error;
522 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
523
524 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
525 name_index, name, buffer, (long)buffer_size);
526
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000527 if (!EXT4_I(inode)->i_file_acl)
David Brazdil0f672f62019-12-10 10:32:29 +0000528 return -ENODATA;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000529 ea_idebug(inode, "reading block %llu",
530 (unsigned long long)EXT4_I(inode)->i_file_acl);
David Brazdil0f672f62019-12-10 10:32:29 +0000531 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
532 if (IS_ERR(bh))
533 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000534 ea_bdebug(bh, "b_count=%d, refcount=%d",
535 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
536 error = ext4_xattr_check_block(inode, bh);
537 if (error)
538 goto cleanup;
539 ext4_xattr_block_cache_insert(ea_block_cache, bh);
540 entry = BFIRST(bh);
541 end = bh->b_data + bh->b_size;
542 error = xattr_find_entry(inode, &entry, end, name_index, name, 1);
543 if (error)
544 goto cleanup;
545 size = le32_to_cpu(entry->e_value_size);
546 error = -ERANGE;
547 if (unlikely(size > EXT4_XATTR_SIZE_MAX))
548 goto cleanup;
549 if (buffer) {
550 if (size > buffer_size)
551 goto cleanup;
552 if (entry->e_value_inum) {
553 error = ext4_xattr_inode_get(inode, entry, buffer,
554 size);
555 if (error)
556 goto cleanup;
557 } else {
558 u16 offset = le16_to_cpu(entry->e_value_offs);
559 void *p = bh->b_data + offset;
560
561 if (unlikely(p + size > end))
562 goto cleanup;
563 memcpy(buffer, p, size);
564 }
565 }
566 error = size;
567
568cleanup:
569 brelse(bh);
570 return error;
571}
572
573int
574ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
575 void *buffer, size_t buffer_size)
576{
577 struct ext4_xattr_ibody_header *header;
578 struct ext4_xattr_entry *entry;
579 struct ext4_inode *raw_inode;
580 struct ext4_iloc iloc;
581 size_t size;
582 void *end;
583 int error;
584
585 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
586 return -ENODATA;
587 error = ext4_get_inode_loc(inode, &iloc);
588 if (error)
589 return error;
590 raw_inode = ext4_raw_inode(&iloc);
591 header = IHDR(inode, raw_inode);
592 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
593 error = xattr_check_inode(inode, header, end);
594 if (error)
595 goto cleanup;
596 entry = IFIRST(header);
597 error = xattr_find_entry(inode, &entry, end, name_index, name, 0);
598 if (error)
599 goto cleanup;
600 size = le32_to_cpu(entry->e_value_size);
601 error = -ERANGE;
602 if (unlikely(size > EXT4_XATTR_SIZE_MAX))
603 goto cleanup;
604 if (buffer) {
605 if (size > buffer_size)
606 goto cleanup;
607 if (entry->e_value_inum) {
608 error = ext4_xattr_inode_get(inode, entry, buffer,
609 size);
610 if (error)
611 goto cleanup;
612 } else {
613 u16 offset = le16_to_cpu(entry->e_value_offs);
614 void *p = (void *)IFIRST(header) + offset;
615
616 if (unlikely(p + size > end))
617 goto cleanup;
618 memcpy(buffer, p, size);
619 }
620 }
621 error = size;
622
623cleanup:
624 brelse(iloc.bh);
625 return error;
626}
627
628/*
629 * ext4_xattr_get()
630 *
631 * Copy an extended attribute into the buffer
632 * provided, or compute the buffer size required.
633 * Buffer is NULL to compute the size of the buffer required.
634 *
635 * Returns a negative error number on failure, or the number of bytes
636 * used / required on success.
637 */
638int
639ext4_xattr_get(struct inode *inode, int name_index, const char *name,
640 void *buffer, size_t buffer_size)
641{
642 int error;
643
644 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
645 return -EIO;
646
647 if (strlen(name) > 255)
648 return -ERANGE;
649
650 down_read(&EXT4_I(inode)->xattr_sem);
651 error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
652 buffer_size);
653 if (error == -ENODATA)
654 error = ext4_xattr_block_get(inode, name_index, name, buffer,
655 buffer_size);
656 up_read(&EXT4_I(inode)->xattr_sem);
657 return error;
658}
659
660static int
661ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
662 char *buffer, size_t buffer_size)
663{
664 size_t rest = buffer_size;
665
666 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
667 const struct xattr_handler *handler =
668 ext4_xattr_handler(entry->e_name_index);
669
670 if (handler && (!handler->list || handler->list(dentry))) {
671 const char *prefix = handler->prefix ?: handler->name;
672 size_t prefix_len = strlen(prefix);
673 size_t size = prefix_len + entry->e_name_len + 1;
674
675 if (buffer) {
676 if (size > rest)
677 return -ERANGE;
678 memcpy(buffer, prefix, prefix_len);
679 buffer += prefix_len;
680 memcpy(buffer, entry->e_name, entry->e_name_len);
681 buffer += entry->e_name_len;
682 *buffer++ = 0;
683 }
684 rest -= size;
685 }
686 }
687 return buffer_size - rest; /* total size */
688}
689
690static int
691ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
692{
693 struct inode *inode = d_inode(dentry);
694 struct buffer_head *bh = NULL;
695 int error;
696
697 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
698 buffer, (long)buffer_size);
699
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000700 if (!EXT4_I(inode)->i_file_acl)
David Brazdil0f672f62019-12-10 10:32:29 +0000701 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000702 ea_idebug(inode, "reading block %llu",
703 (unsigned long long)EXT4_I(inode)->i_file_acl);
David Brazdil0f672f62019-12-10 10:32:29 +0000704 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
705 if (IS_ERR(bh))
706 return PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000707 ea_bdebug(bh, "b_count=%d, refcount=%d",
708 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
709 error = ext4_xattr_check_block(inode, bh);
710 if (error)
711 goto cleanup;
712 ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
David Brazdil0f672f62019-12-10 10:32:29 +0000713 error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer,
714 buffer_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000715cleanup:
716 brelse(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000717 return error;
718}
719
720static int
721ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
722{
723 struct inode *inode = d_inode(dentry);
724 struct ext4_xattr_ibody_header *header;
725 struct ext4_inode *raw_inode;
726 struct ext4_iloc iloc;
727 void *end;
728 int error;
729
730 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
731 return 0;
732 error = ext4_get_inode_loc(inode, &iloc);
733 if (error)
734 return error;
735 raw_inode = ext4_raw_inode(&iloc);
736 header = IHDR(inode, raw_inode);
737 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
738 error = xattr_check_inode(inode, header, end);
739 if (error)
740 goto cleanup;
741 error = ext4_xattr_list_entries(dentry, IFIRST(header),
742 buffer, buffer_size);
743
744cleanup:
745 brelse(iloc.bh);
746 return error;
747}
748
749/*
750 * Inode operation listxattr()
751 *
752 * d_inode(dentry)->i_rwsem: don't care
753 *
754 * Copy a list of attribute names into the buffer
755 * provided, or compute the buffer size required.
756 * Buffer is NULL to compute the size of the buffer required.
757 *
758 * Returns a negative error number on failure, or the number of bytes
759 * used / required on success.
760 */
761ssize_t
762ext4_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
763{
764 int ret, ret2;
765
766 down_read(&EXT4_I(d_inode(dentry))->xattr_sem);
767 ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
768 if (ret < 0)
769 goto errout;
770 if (buffer) {
771 buffer += ret;
772 buffer_size -= ret;
773 }
774 ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
775 if (ret < 0)
776 goto errout;
777 ret += ret2;
778errout:
779 up_read(&EXT4_I(d_inode(dentry))->xattr_sem);
780 return ret;
781}
782
783/*
784 * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is
785 * not set, set it.
786 */
787static void ext4_xattr_update_super_block(handle_t *handle,
788 struct super_block *sb)
789{
790 if (ext4_has_feature_xattr(sb))
791 return;
792
793 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
794 if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
795 ext4_set_feature_xattr(sb);
796 ext4_handle_dirty_super(handle, sb);
797 }
798}
799
800int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
801{
802 struct ext4_iloc iloc = { .bh = NULL };
803 struct buffer_head *bh = NULL;
804 struct ext4_inode *raw_inode;
805 struct ext4_xattr_ibody_header *header;
806 struct ext4_xattr_entry *entry;
807 qsize_t ea_inode_refs = 0;
808 void *end;
809 int ret;
810
811 lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem);
812
813 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
814 ret = ext4_get_inode_loc(inode, &iloc);
815 if (ret)
816 goto out;
817 raw_inode = ext4_raw_inode(&iloc);
818 header = IHDR(inode, raw_inode);
819 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
820 ret = xattr_check_inode(inode, header, end);
821 if (ret)
822 goto out;
823
824 for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
825 entry = EXT4_XATTR_NEXT(entry))
826 if (entry->e_value_inum)
827 ea_inode_refs++;
828 }
829
830 if (EXT4_I(inode)->i_file_acl) {
David Brazdil0f672f62019-12-10 10:32:29 +0000831 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
832 if (IS_ERR(bh)) {
833 ret = PTR_ERR(bh);
834 bh = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000835 goto out;
836 }
837
838 ret = ext4_xattr_check_block(inode, bh);
839 if (ret)
840 goto out;
841
842 for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
843 entry = EXT4_XATTR_NEXT(entry))
844 if (entry->e_value_inum)
845 ea_inode_refs++;
846 }
847 *usage = ea_inode_refs + 1;
848 ret = 0;
849out:
850 brelse(iloc.bh);
851 brelse(bh);
852 return ret;
853}
854
855static inline size_t round_up_cluster(struct inode *inode, size_t length)
856{
857 struct super_block *sb = inode->i_sb;
858 size_t cluster_size = 1 << (EXT4_SB(sb)->s_cluster_bits +
859 inode->i_blkbits);
860 size_t mask = ~(cluster_size - 1);
861
862 return (length + cluster_size - 1) & mask;
863}
864
865static int ext4_xattr_inode_alloc_quota(struct inode *inode, size_t len)
866{
867 int err;
868
869 err = dquot_alloc_inode(inode);
870 if (err)
871 return err;
872 err = dquot_alloc_space_nodirty(inode, round_up_cluster(inode, len));
873 if (err)
874 dquot_free_inode(inode);
875 return err;
876}
877
878static void ext4_xattr_inode_free_quota(struct inode *parent,
879 struct inode *ea_inode,
880 size_t len)
881{
882 if (ea_inode &&
883 ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE))
884 return;
885 dquot_free_space_nodirty(parent, round_up_cluster(parent, len));
886 dquot_free_inode(parent);
887}
888
889int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
890 struct buffer_head *block_bh, size_t value_len,
891 bool is_create)
892{
893 int credits;
894 int blocks;
895
896 /*
897 * 1) Owner inode update
898 * 2) Ref count update on old xattr block
899 * 3) new xattr block
900 * 4) block bitmap update for new xattr block
901 * 5) group descriptor for new xattr block
902 * 6) block bitmap update for old xattr block
903 * 7) group descriptor for old block
904 *
905 * 6 & 7 can happen if we have two racing threads T_a and T_b
906 * which are each trying to set an xattr on inodes I_a and I_b
907 * which were both initially sharing an xattr block.
908 */
909 credits = 7;
910
911 /* Quota updates. */
912 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(sb);
913
914 /*
915 * In case of inline data, we may push out the data to a block,
916 * so we need to reserve credits for this eventuality
917 */
918 if (inode && ext4_has_inline_data(inode))
919 credits += ext4_writepage_trans_blocks(inode) + 1;
920
921 /* We are done if ea_inode feature is not enabled. */
922 if (!ext4_has_feature_ea_inode(sb))
923 return credits;
924
925 /* New ea_inode, inode map, block bitmap, group descriptor. */
926 credits += 4;
927
928 /* Data blocks. */
929 blocks = (value_len + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
930
931 /* Indirection block or one level of extent tree. */
932 blocks += 1;
933
934 /* Block bitmap and group descriptor updates for each block. */
935 credits += blocks * 2;
936
937 /* Blocks themselves. */
938 credits += blocks;
939
940 if (!is_create) {
941 /* Dereference ea_inode holding old xattr value.
942 * Old ea_inode, inode map, block bitmap, group descriptor.
943 */
944 credits += 4;
945
946 /* Data blocks for old ea_inode. */
947 blocks = XATTR_SIZE_MAX >> sb->s_blocksize_bits;
948
949 /* Indirection block or one level of extent tree for old
950 * ea_inode.
951 */
952 blocks += 1;
953
954 /* Block bitmap and group descriptor updates for each block. */
955 credits += blocks * 2;
956 }
957
958 /* We may need to clone the existing xattr block in which case we need
959 * to increment ref counts for existing ea_inodes referenced by it.
960 */
961 if (block_bh) {
962 struct ext4_xattr_entry *entry = BFIRST(block_bh);
963
964 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry))
965 if (entry->e_value_inum)
966 /* Ref count update on ea_inode. */
967 credits += 1;
968 }
969 return credits;
970}
971
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000972static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
973 int ref_change)
974{
975 struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode);
976 struct ext4_iloc iloc;
977 s64 ref_count;
978 u32 hash;
979 int ret;
980
981 inode_lock(ea_inode);
982
983 ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
David Brazdil0f672f62019-12-10 10:32:29 +0000984 if (ret)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000985 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000986
987 ref_count = ext4_xattr_inode_get_ref(ea_inode);
988 ref_count += ref_change;
989 ext4_xattr_inode_set_ref(ea_inode, ref_count);
990
991 if (ref_change > 0) {
992 WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld",
993 ea_inode->i_ino, ref_count);
994
995 if (ref_count == 1) {
996 WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u",
997 ea_inode->i_ino, ea_inode->i_nlink);
998
999 set_nlink(ea_inode, 1);
1000 ext4_orphan_del(handle, ea_inode);
1001
1002 if (ea_inode_cache) {
1003 hash = ext4_xattr_inode_get_hash(ea_inode);
1004 mb_cache_entry_create(ea_inode_cache,
1005 GFP_NOFS, hash,
1006 ea_inode->i_ino,
1007 true /* reusable */);
1008 }
1009 }
1010 } else {
1011 WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
1012 ea_inode->i_ino, ref_count);
1013
1014 if (ref_count == 0) {
1015 WARN_ONCE(ea_inode->i_nlink != 1,
1016 "EA inode %lu i_nlink=%u",
1017 ea_inode->i_ino, ea_inode->i_nlink);
1018
1019 clear_nlink(ea_inode);
1020 ext4_orphan_add(handle, ea_inode);
1021
1022 if (ea_inode_cache) {
1023 hash = ext4_xattr_inode_get_hash(ea_inode);
1024 mb_cache_entry_delete(ea_inode_cache, hash,
1025 ea_inode->i_ino);
1026 }
1027 }
1028 }
1029
1030 ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001031 if (ret)
1032 ext4_warning_inode(ea_inode,
1033 "ext4_mark_iloc_dirty() failed ret=%d", ret);
1034out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001035 inode_unlock(ea_inode);
1036 return ret;
1037}
1038
1039static int ext4_xattr_inode_inc_ref(handle_t *handle, struct inode *ea_inode)
1040{
1041 return ext4_xattr_inode_update_ref(handle, ea_inode, 1);
1042}
1043
1044static int ext4_xattr_inode_dec_ref(handle_t *handle, struct inode *ea_inode)
1045{
1046 return ext4_xattr_inode_update_ref(handle, ea_inode, -1);
1047}
1048
1049static int ext4_xattr_inode_inc_ref_all(handle_t *handle, struct inode *parent,
1050 struct ext4_xattr_entry *first)
1051{
1052 struct inode *ea_inode;
1053 struct ext4_xattr_entry *entry;
1054 struct ext4_xattr_entry *failed_entry;
1055 unsigned int ea_ino;
1056 int err, saved_err;
1057
1058 for (entry = first; !IS_LAST_ENTRY(entry);
1059 entry = EXT4_XATTR_NEXT(entry)) {
1060 if (!entry->e_value_inum)
1061 continue;
1062 ea_ino = le32_to_cpu(entry->e_value_inum);
1063 err = ext4_xattr_inode_iget(parent, ea_ino,
1064 le32_to_cpu(entry->e_hash),
1065 &ea_inode);
1066 if (err)
1067 goto cleanup;
1068 err = ext4_xattr_inode_inc_ref(handle, ea_inode);
1069 if (err) {
1070 ext4_warning_inode(ea_inode, "inc ref error %d", err);
1071 iput(ea_inode);
1072 goto cleanup;
1073 }
1074 iput(ea_inode);
1075 }
1076 return 0;
1077
1078cleanup:
1079 saved_err = err;
1080 failed_entry = entry;
1081
1082 for (entry = first; entry != failed_entry;
1083 entry = EXT4_XATTR_NEXT(entry)) {
1084 if (!entry->e_value_inum)
1085 continue;
1086 ea_ino = le32_to_cpu(entry->e_value_inum);
1087 err = ext4_xattr_inode_iget(parent, ea_ino,
1088 le32_to_cpu(entry->e_hash),
1089 &ea_inode);
1090 if (err) {
1091 ext4_warning(parent->i_sb,
1092 "cleanup ea_ino %u iget error %d", ea_ino,
1093 err);
1094 continue;
1095 }
1096 err = ext4_xattr_inode_dec_ref(handle, ea_inode);
1097 if (err)
1098 ext4_warning_inode(ea_inode, "cleanup dec ref error %d",
1099 err);
1100 iput(ea_inode);
1101 }
1102 return saved_err;
1103}
1104
Olivier Deprez157378f2022-04-04 15:47:50 +02001105static int ext4_xattr_restart_fn(handle_t *handle, struct inode *inode,
1106 struct buffer_head *bh, bool block_csum, bool dirty)
1107{
1108 int error;
1109
1110 if (bh && dirty) {
1111 if (block_csum)
1112 ext4_xattr_block_csum_set(inode, bh);
1113 error = ext4_handle_dirty_metadata(handle, NULL, bh);
1114 if (error) {
1115 ext4_warning(inode->i_sb, "Handle metadata (error %d)",
1116 error);
1117 return error;
1118 }
1119 }
1120 return 0;
1121}
1122
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001123static void
1124ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
1125 struct buffer_head *bh,
1126 struct ext4_xattr_entry *first, bool block_csum,
1127 struct ext4_xattr_inode_array **ea_inode_array,
1128 int extra_credits, bool skip_quota)
1129{
1130 struct inode *ea_inode;
1131 struct ext4_xattr_entry *entry;
1132 bool dirty = false;
1133 unsigned int ea_ino;
1134 int err;
1135 int credits;
1136
1137 /* One credit for dec ref on ea_inode, one for orphan list addition, */
1138 credits = 2 + extra_credits;
1139
1140 for (entry = first; !IS_LAST_ENTRY(entry);
1141 entry = EXT4_XATTR_NEXT(entry)) {
1142 if (!entry->e_value_inum)
1143 continue;
1144 ea_ino = le32_to_cpu(entry->e_value_inum);
1145 err = ext4_xattr_inode_iget(parent, ea_ino,
1146 le32_to_cpu(entry->e_hash),
1147 &ea_inode);
1148 if (err)
1149 continue;
1150
1151 err = ext4_expand_inode_array(ea_inode_array, ea_inode);
1152 if (err) {
1153 ext4_warning_inode(ea_inode,
1154 "Expand inode array err=%d", err);
1155 iput(ea_inode);
1156 continue;
1157 }
1158
Olivier Deprez157378f2022-04-04 15:47:50 +02001159 err = ext4_journal_ensure_credits_fn(handle, credits, credits,
1160 ext4_free_metadata_revoke_credits(parent->i_sb, 1),
1161 ext4_xattr_restart_fn(handle, parent, bh, block_csum,
1162 dirty));
1163 if (err < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001164 ext4_warning_inode(ea_inode, "Ensure credits err=%d",
1165 err);
1166 continue;
1167 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001168 if (err > 0) {
1169 err = ext4_journal_get_write_access(handle, bh);
1170 if (err) {
1171 ext4_warning_inode(ea_inode,
1172 "Re-get write access err=%d",
1173 err);
1174 continue;
1175 }
1176 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001177
1178 err = ext4_xattr_inode_dec_ref(handle, ea_inode);
1179 if (err) {
1180 ext4_warning_inode(ea_inode, "ea_inode dec ref err=%d",
1181 err);
1182 continue;
1183 }
1184
1185 if (!skip_quota)
1186 ext4_xattr_inode_free_quota(parent, ea_inode,
1187 le32_to_cpu(entry->e_value_size));
1188
1189 /*
1190 * Forget about ea_inode within the same transaction that
1191 * decrements the ref count. This avoids duplicate decrements in
1192 * case the rest of the work spills over to subsequent
1193 * transactions.
1194 */
1195 entry->e_value_inum = 0;
1196 entry->e_value_size = 0;
1197
1198 dirty = true;
1199 }
1200
1201 if (dirty) {
1202 /*
1203 * Note that we are deliberately skipping csum calculation for
1204 * the final update because we do not expect any journal
1205 * restarts until xattr block is freed.
1206 */
1207
1208 err = ext4_handle_dirty_metadata(handle, NULL, bh);
1209 if (err)
1210 ext4_warning_inode(parent,
1211 "handle dirty metadata err=%d", err);
1212 }
1213}
1214
1215/*
1216 * Release the xattr block BH: If the reference count is > 1, decrement it;
1217 * otherwise free the block.
1218 */
1219static void
1220ext4_xattr_release_block(handle_t *handle, struct inode *inode,
1221 struct buffer_head *bh,
1222 struct ext4_xattr_inode_array **ea_inode_array,
1223 int extra_credits)
1224{
1225 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
1226 u32 hash, ref;
1227 int error = 0;
1228
1229 BUFFER_TRACE(bh, "get_write_access");
1230 error = ext4_journal_get_write_access(handle, bh);
1231 if (error)
1232 goto out;
1233
1234 lock_buffer(bh);
1235 hash = le32_to_cpu(BHDR(bh)->h_hash);
1236 ref = le32_to_cpu(BHDR(bh)->h_refcount);
1237 if (ref == 1) {
1238 ea_bdebug(bh, "refcount now=0; freeing");
1239 /*
1240 * This must happen under buffer lock for
1241 * ext4_xattr_block_set() to reliably detect freed block
1242 */
1243 if (ea_block_cache)
1244 mb_cache_entry_delete(ea_block_cache, hash,
1245 bh->b_blocknr);
1246 get_bh(bh);
1247 unlock_buffer(bh);
1248
1249 if (ext4_has_feature_ea_inode(inode->i_sb))
1250 ext4_xattr_inode_dec_ref_all(handle, inode, bh,
1251 BFIRST(bh),
1252 true /* block_csum */,
1253 ea_inode_array,
1254 extra_credits,
1255 true /* skip_quota */);
1256 ext4_free_blocks(handle, inode, bh, 0, 1,
1257 EXT4_FREE_BLOCKS_METADATA |
1258 EXT4_FREE_BLOCKS_FORGET);
1259 } else {
1260 ref--;
1261 BHDR(bh)->h_refcount = cpu_to_le32(ref);
1262 if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) {
1263 struct mb_cache_entry *ce;
1264
1265 if (ea_block_cache) {
1266 ce = mb_cache_entry_get(ea_block_cache, hash,
1267 bh->b_blocknr);
1268 if (ce) {
1269 ce->e_reusable = 1;
1270 mb_cache_entry_put(ea_block_cache, ce);
1271 }
1272 }
1273 }
1274
1275 ext4_xattr_block_csum_set(inode, bh);
1276 /*
1277 * Beware of this ugliness: Releasing of xattr block references
1278 * from different inodes can race and so we have to protect
1279 * from a race where someone else frees the block (and releases
1280 * its journal_head) before we are done dirtying the buffer. In
1281 * nojournal mode this race is harmless and we actually cannot
1282 * call ext4_handle_dirty_metadata() with locked buffer as
1283 * that function can call sync_dirty_buffer() so for that case
1284 * we handle the dirtying after unlocking the buffer.
1285 */
1286 if (ext4_handle_valid(handle))
1287 error = ext4_handle_dirty_metadata(handle, inode, bh);
1288 unlock_buffer(bh);
1289 if (!ext4_handle_valid(handle))
1290 error = ext4_handle_dirty_metadata(handle, inode, bh);
1291 if (IS_SYNC(inode))
1292 ext4_handle_sync(handle);
1293 dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
1294 ea_bdebug(bh, "refcount now=%d; releasing",
1295 le32_to_cpu(BHDR(bh)->h_refcount));
1296 }
1297out:
1298 ext4_std_error(inode->i_sb, error);
1299 return;
1300}
1301
1302/*
1303 * Find the available free space for EAs. This also returns the total number of
1304 * bytes used by EA entries.
1305 */
1306static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
1307 size_t *min_offs, void *base, int *total)
1308{
1309 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1310 if (!last->e_value_inum && last->e_value_size) {
1311 size_t offs = le16_to_cpu(last->e_value_offs);
1312 if (offs < *min_offs)
1313 *min_offs = offs;
1314 }
1315 if (total)
1316 *total += EXT4_XATTR_LEN(last->e_name_len);
1317 }
1318 return (*min_offs - ((void *)last - base) - sizeof(__u32));
1319}
1320
1321/*
1322 * Write the value of the EA in an inode.
1323 */
1324static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
1325 const void *buf, int bufsize)
1326{
1327 struct buffer_head *bh = NULL;
1328 unsigned long block = 0;
1329 int blocksize = ea_inode->i_sb->s_blocksize;
1330 int max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
1331 int csize, wsize = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02001332 int ret = 0, ret2 = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001333 int retries = 0;
1334
1335retry:
1336 while (ret >= 0 && ret < max_blocks) {
1337 struct ext4_map_blocks map;
1338 map.m_lblk = block += ret;
1339 map.m_len = max_blocks -= ret;
1340
1341 ret = ext4_map_blocks(handle, ea_inode, &map,
1342 EXT4_GET_BLOCKS_CREATE);
1343 if (ret <= 0) {
1344 ext4_mark_inode_dirty(handle, ea_inode);
1345 if (ret == -ENOSPC &&
1346 ext4_should_retry_alloc(ea_inode->i_sb, &retries)) {
1347 ret = 0;
1348 goto retry;
1349 }
1350 break;
1351 }
1352 }
1353
1354 if (ret < 0)
1355 return ret;
1356
1357 block = 0;
1358 while (wsize < bufsize) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001359 brelse(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001360 csize = (bufsize - wsize) > blocksize ? blocksize :
1361 bufsize - wsize;
1362 bh = ext4_getblk(handle, ea_inode, block, 0);
1363 if (IS_ERR(bh))
1364 return PTR_ERR(bh);
1365 if (!bh) {
1366 WARN_ON_ONCE(1);
1367 EXT4_ERROR_INODE(ea_inode,
1368 "ext4_getblk() return bh = NULL");
1369 return -EFSCORRUPTED;
1370 }
1371 ret = ext4_journal_get_write_access(handle, bh);
1372 if (ret)
1373 goto out;
1374
1375 memcpy(bh->b_data, buf, csize);
1376 set_buffer_uptodate(bh);
1377 ext4_handle_dirty_metadata(handle, ea_inode, bh);
1378
1379 buf += csize;
1380 wsize += csize;
1381 block += 1;
1382 }
1383
1384 inode_lock(ea_inode);
1385 i_size_write(ea_inode, wsize);
1386 ext4_update_i_disksize(ea_inode, wsize);
1387 inode_unlock(ea_inode);
1388
Olivier Deprez157378f2022-04-04 15:47:50 +02001389 ret2 = ext4_mark_inode_dirty(handle, ea_inode);
1390 if (unlikely(ret2 && !ret))
1391 ret = ret2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001392
1393out:
1394 brelse(bh);
1395
1396 return ret;
1397}
1398
1399/*
1400 * Create an inode to store the value of a large EA.
1401 */
1402static struct inode *ext4_xattr_inode_create(handle_t *handle,
1403 struct inode *inode, u32 hash)
1404{
1405 struct inode *ea_inode = NULL;
1406 uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) };
1407 int err;
1408
1409 /*
1410 * Let the next inode be the goal, so we try and allocate the EA inode
1411 * in the same group, or nearby one.
1412 */
1413 ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
1414 S_IFREG | 0600, NULL, inode->i_ino + 1, owner,
1415 EXT4_EA_INODE_FL);
1416 if (!IS_ERR(ea_inode)) {
1417 ea_inode->i_op = &ext4_file_inode_operations;
1418 ea_inode->i_fop = &ext4_file_operations;
1419 ext4_set_aops(ea_inode);
1420 ext4_xattr_inode_set_class(ea_inode);
1421 unlock_new_inode(ea_inode);
1422 ext4_xattr_inode_set_ref(ea_inode, 1);
1423 ext4_xattr_inode_set_hash(ea_inode, hash);
1424 err = ext4_mark_inode_dirty(handle, ea_inode);
1425 if (!err)
1426 err = ext4_inode_attach_jinode(ea_inode);
1427 if (err) {
1428 iput(ea_inode);
1429 return ERR_PTR(err);
1430 }
1431
1432 /*
1433 * Xattr inodes are shared therefore quota charging is performed
1434 * at a higher level.
1435 */
1436 dquot_free_inode(ea_inode);
1437 dquot_drop(ea_inode);
1438 inode_lock(ea_inode);
1439 ea_inode->i_flags |= S_NOQUOTA;
1440 inode_unlock(ea_inode);
1441 }
1442
1443 return ea_inode;
1444}
1445
1446static struct inode *
1447ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
1448 size_t value_len, u32 hash)
1449{
1450 struct inode *ea_inode;
1451 struct mb_cache_entry *ce;
1452 struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
1453 void *ea_data;
1454
1455 if (!ea_inode_cache)
1456 return NULL;
1457
1458 ce = mb_cache_entry_find_first(ea_inode_cache, hash);
1459 if (!ce)
1460 return NULL;
1461
Olivier Deprez0e641232021-09-23 10:07:05 +02001462 WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) &&
1463 !(current->flags & PF_MEMALLOC_NOFS));
1464
Olivier Deprez157378f2022-04-04 15:47:50 +02001465 ea_data = kvmalloc(value_len, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001466 if (!ea_data) {
1467 mb_cache_entry_put(ea_inode_cache, ce);
1468 return NULL;
1469 }
1470
1471 while (ce) {
David Brazdil0f672f62019-12-10 10:32:29 +00001472 ea_inode = ext4_iget(inode->i_sb, ce->e_value,
1473 EXT4_IGET_NORMAL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001474 if (!IS_ERR(ea_inode) &&
1475 !is_bad_inode(ea_inode) &&
1476 (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
1477 i_size_read(ea_inode) == value_len &&
1478 !ext4_xattr_inode_read(ea_inode, ea_data, value_len) &&
1479 !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data,
1480 value_len) &&
1481 !memcmp(value, ea_data, value_len)) {
1482 mb_cache_entry_touch(ea_inode_cache, ce);
1483 mb_cache_entry_put(ea_inode_cache, ce);
1484 kvfree(ea_data);
1485 return ea_inode;
1486 }
1487
1488 if (!IS_ERR(ea_inode))
1489 iput(ea_inode);
1490 ce = mb_cache_entry_find_next(ea_inode_cache, ce);
1491 }
1492 kvfree(ea_data);
1493 return NULL;
1494}
1495
1496/*
1497 * Add value of the EA in an inode.
1498 */
1499static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
1500 const void *value, size_t value_len,
1501 struct inode **ret_inode)
1502{
1503 struct inode *ea_inode;
1504 u32 hash;
1505 int err;
1506
1507 hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len);
1508 ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash);
1509 if (ea_inode) {
1510 err = ext4_xattr_inode_inc_ref(handle, ea_inode);
1511 if (err) {
1512 iput(ea_inode);
1513 return err;
1514 }
1515
1516 *ret_inode = ea_inode;
1517 return 0;
1518 }
1519
1520 /* Create an inode for the EA value */
1521 ea_inode = ext4_xattr_inode_create(handle, inode, hash);
1522 if (IS_ERR(ea_inode))
1523 return PTR_ERR(ea_inode);
1524
1525 err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
1526 if (err) {
1527 ext4_xattr_inode_dec_ref(handle, ea_inode);
1528 iput(ea_inode);
1529 return err;
1530 }
1531
1532 if (EA_INODE_CACHE(inode))
1533 mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
1534 ea_inode->i_ino, true /* reusable */);
1535
1536 *ret_inode = ea_inode;
1537 return 0;
1538}
1539
1540/*
1541 * Reserve min(block_size/8, 1024) bytes for xattr entries/names if ea_inode
1542 * feature is enabled.
1543 */
1544#define EXT4_XATTR_BLOCK_RESERVE(inode) min(i_blocksize(inode)/8, 1024U)
1545
1546static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
1547 struct ext4_xattr_search *s,
1548 handle_t *handle, struct inode *inode,
1549 bool is_block)
1550{
1551 struct ext4_xattr_entry *last, *next;
1552 struct ext4_xattr_entry *here = s->here;
1553 size_t min_offs = s->end - s->base, name_len = strlen(i->name);
1554 int in_inode = i->in_inode;
1555 struct inode *old_ea_inode = NULL;
1556 struct inode *new_ea_inode = NULL;
1557 size_t old_size, new_size;
1558 int ret;
1559
1560 /* Space used by old and new values. */
1561 old_size = (!s->not_found && !here->e_value_inum) ?
1562 EXT4_XATTR_SIZE(le32_to_cpu(here->e_value_size)) : 0;
1563 new_size = (i->value && !in_inode) ? EXT4_XATTR_SIZE(i->value_len) : 0;
1564
1565 /*
1566 * Optimization for the simple case when old and new values have the
1567 * same padded sizes. Not applicable if external inodes are involved.
1568 */
1569 if (new_size && new_size == old_size) {
1570 size_t offs = le16_to_cpu(here->e_value_offs);
1571 void *val = s->base + offs;
1572
1573 here->e_value_size = cpu_to_le32(i->value_len);
1574 if (i->value == EXT4_ZERO_XATTR_VALUE) {
1575 memset(val, 0, new_size);
1576 } else {
1577 memcpy(val, i->value, i->value_len);
1578 /* Clear padding bytes. */
1579 memset(val + i->value_len, 0, new_size - i->value_len);
1580 }
1581 goto update_hash;
1582 }
1583
1584 /* Compute min_offs and last. */
1585 last = s->first;
1586 for (; !IS_LAST_ENTRY(last); last = next) {
1587 next = EXT4_XATTR_NEXT(last);
1588 if ((void *)next >= s->end) {
1589 EXT4_ERROR_INODE(inode, "corrupted xattr entries");
1590 ret = -EFSCORRUPTED;
1591 goto out;
1592 }
1593 if (!last->e_value_inum && last->e_value_size) {
1594 size_t offs = le16_to_cpu(last->e_value_offs);
1595 if (offs < min_offs)
1596 min_offs = offs;
1597 }
1598 }
1599
1600 /* Check whether we have enough space. */
1601 if (i->value) {
1602 size_t free;
1603
1604 free = min_offs - ((void *)last - s->base) - sizeof(__u32);
1605 if (!s->not_found)
1606 free += EXT4_XATTR_LEN(name_len) + old_size;
1607
1608 if (free < EXT4_XATTR_LEN(name_len) + new_size) {
1609 ret = -ENOSPC;
1610 goto out;
1611 }
1612
1613 /*
1614 * If storing the value in an external inode is an option,
1615 * reserve space for xattr entries/names in the external
1616 * attribute block so that a long value does not occupy the
1617 * whole space and prevent futher entries being added.
1618 */
1619 if (ext4_has_feature_ea_inode(inode->i_sb) &&
1620 new_size && is_block &&
1621 (min_offs + old_size - new_size) <
1622 EXT4_XATTR_BLOCK_RESERVE(inode)) {
1623 ret = -ENOSPC;
1624 goto out;
1625 }
1626 }
1627
1628 /*
1629 * Getting access to old and new ea inodes is subject to failures.
1630 * Finish that work before doing any modifications to the xattr data.
1631 */
1632 if (!s->not_found && here->e_value_inum) {
1633 ret = ext4_xattr_inode_iget(inode,
1634 le32_to_cpu(here->e_value_inum),
1635 le32_to_cpu(here->e_hash),
1636 &old_ea_inode);
1637 if (ret) {
1638 old_ea_inode = NULL;
1639 goto out;
1640 }
1641 }
1642 if (i->value && in_inode) {
1643 WARN_ON_ONCE(!i->value_len);
1644
1645 ret = ext4_xattr_inode_alloc_quota(inode, i->value_len);
1646 if (ret)
1647 goto out;
1648
1649 ret = ext4_xattr_inode_lookup_create(handle, inode, i->value,
1650 i->value_len,
1651 &new_ea_inode);
1652 if (ret) {
1653 new_ea_inode = NULL;
1654 ext4_xattr_inode_free_quota(inode, NULL, i->value_len);
1655 goto out;
1656 }
1657 }
1658
1659 if (old_ea_inode) {
1660 /* We are ready to release ref count on the old_ea_inode. */
1661 ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
1662 if (ret) {
1663 /* Release newly required ref count on new_ea_inode. */
1664 if (new_ea_inode) {
1665 int err;
1666
1667 err = ext4_xattr_inode_dec_ref(handle,
1668 new_ea_inode);
1669 if (err)
1670 ext4_warning_inode(new_ea_inode,
1671 "dec ref new_ea_inode err=%d",
1672 err);
1673 ext4_xattr_inode_free_quota(inode, new_ea_inode,
1674 i->value_len);
1675 }
1676 goto out;
1677 }
1678
1679 ext4_xattr_inode_free_quota(inode, old_ea_inode,
1680 le32_to_cpu(here->e_value_size));
1681 }
1682
1683 /* No failures allowed past this point. */
1684
David Brazdil0f672f62019-12-10 10:32:29 +00001685 if (!s->not_found && here->e_value_size && !here->e_value_inum) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001686 /* Remove the old value. */
1687 void *first_val = s->base + min_offs;
1688 size_t offs = le16_to_cpu(here->e_value_offs);
1689 void *val = s->base + offs;
1690
1691 memmove(first_val + old_size, first_val, val - first_val);
1692 memset(first_val, 0, old_size);
1693 min_offs += old_size;
1694
1695 /* Adjust all value offsets. */
1696 last = s->first;
1697 while (!IS_LAST_ENTRY(last)) {
1698 size_t o = le16_to_cpu(last->e_value_offs);
1699
1700 if (!last->e_value_inum &&
1701 last->e_value_size && o < offs)
1702 last->e_value_offs = cpu_to_le16(o + old_size);
1703 last = EXT4_XATTR_NEXT(last);
1704 }
1705 }
1706
1707 if (!i->value) {
1708 /* Remove old name. */
1709 size_t size = EXT4_XATTR_LEN(name_len);
1710
1711 last = ENTRY((void *)last - size);
1712 memmove(here, (void *)here + size,
1713 (void *)last - (void *)here + sizeof(__u32));
1714 memset(last, 0, size);
1715 } else if (s->not_found) {
1716 /* Insert new name. */
1717 size_t size = EXT4_XATTR_LEN(name_len);
1718 size_t rest = (void *)last - (void *)here + sizeof(__u32);
1719
1720 memmove((void *)here + size, here, rest);
1721 memset(here, 0, size);
1722 here->e_name_index = i->name_index;
1723 here->e_name_len = name_len;
1724 memcpy(here->e_name, i->name, name_len);
1725 } else {
1726 /* This is an update, reset value info. */
1727 here->e_value_inum = 0;
1728 here->e_value_offs = 0;
1729 here->e_value_size = 0;
1730 }
1731
1732 if (i->value) {
1733 /* Insert new value. */
1734 if (in_inode) {
1735 here->e_value_inum = cpu_to_le32(new_ea_inode->i_ino);
1736 } else if (i->value_len) {
1737 void *val = s->base + min_offs - new_size;
1738
1739 here->e_value_offs = cpu_to_le16(min_offs - new_size);
1740 if (i->value == EXT4_ZERO_XATTR_VALUE) {
1741 memset(val, 0, new_size);
1742 } else {
1743 memcpy(val, i->value, i->value_len);
1744 /* Clear padding bytes. */
1745 memset(val + i->value_len, 0,
1746 new_size - i->value_len);
1747 }
1748 }
1749 here->e_value_size = cpu_to_le32(i->value_len);
1750 }
1751
1752update_hash:
1753 if (i->value) {
1754 __le32 hash = 0;
1755
1756 /* Entry hash calculation. */
1757 if (in_inode) {
1758 __le32 crc32c_hash;
1759
1760 /*
1761 * Feed crc32c hash instead of the raw value for entry
1762 * hash calculation. This is to avoid walking
1763 * potentially long value buffer again.
1764 */
1765 crc32c_hash = cpu_to_le32(
1766 ext4_xattr_inode_get_hash(new_ea_inode));
1767 hash = ext4_xattr_hash_entry(here->e_name,
1768 here->e_name_len,
1769 &crc32c_hash, 1);
1770 } else if (is_block) {
1771 __le32 *value = s->base + le16_to_cpu(
1772 here->e_value_offs);
1773
1774 hash = ext4_xattr_hash_entry(here->e_name,
1775 here->e_name_len, value,
1776 new_size >> 2);
1777 }
1778 here->e_hash = hash;
1779 }
1780
1781 if (is_block)
1782 ext4_xattr_rehash((struct ext4_xattr_header *)s->base);
1783
1784 ret = 0;
1785out:
1786 iput(old_ea_inode);
1787 iput(new_ea_inode);
1788 return ret;
1789}
1790
1791struct ext4_xattr_block_find {
1792 struct ext4_xattr_search s;
1793 struct buffer_head *bh;
1794};
1795
1796static int
1797ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
1798 struct ext4_xattr_block_find *bs)
1799{
1800 struct super_block *sb = inode->i_sb;
1801 int error;
1802
1803 ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
1804 i->name_index, i->name, i->value, (long)i->value_len);
1805
1806 if (EXT4_I(inode)->i_file_acl) {
1807 /* The inode already has an extended attribute block. */
David Brazdil0f672f62019-12-10 10:32:29 +00001808 bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
Olivier Deprez0e641232021-09-23 10:07:05 +02001809 if (IS_ERR(bs->bh)) {
1810 error = PTR_ERR(bs->bh);
1811 bs->bh = NULL;
1812 return error;
1813 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001814 ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
1815 atomic_read(&(bs->bh->b_count)),
1816 le32_to_cpu(BHDR(bs->bh)->h_refcount));
1817 error = ext4_xattr_check_block(inode, bs->bh);
1818 if (error)
David Brazdil0f672f62019-12-10 10:32:29 +00001819 return error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001820 /* Find the named attribute. */
1821 bs->s.base = BHDR(bs->bh);
1822 bs->s.first = BFIRST(bs->bh);
1823 bs->s.end = bs->bh->b_data + bs->bh->b_size;
1824 bs->s.here = bs->s.first;
1825 error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
1826 i->name_index, i->name, 1);
1827 if (error && error != -ENODATA)
David Brazdil0f672f62019-12-10 10:32:29 +00001828 return error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001829 bs->s.not_found = error;
1830 }
David Brazdil0f672f62019-12-10 10:32:29 +00001831 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001832}
1833
1834static int
1835ext4_xattr_block_set(handle_t *handle, struct inode *inode,
1836 struct ext4_xattr_info *i,
1837 struct ext4_xattr_block_find *bs)
1838{
1839 struct super_block *sb = inode->i_sb;
1840 struct buffer_head *new_bh = NULL;
1841 struct ext4_xattr_search s_copy = bs->s;
1842 struct ext4_xattr_search *s = &s_copy;
1843 struct mb_cache_entry *ce = NULL;
1844 int error = 0;
1845 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
1846 struct inode *ea_inode = NULL, *tmp_inode;
1847 size_t old_ea_inode_quota = 0;
1848 unsigned int ea_ino;
1849
1850
1851#define header(x) ((struct ext4_xattr_header *)(x))
1852
1853 if (s->base) {
1854 BUFFER_TRACE(bs->bh, "get_write_access");
1855 error = ext4_journal_get_write_access(handle, bs->bh);
1856 if (error)
1857 goto cleanup;
1858 lock_buffer(bs->bh);
1859
1860 if (header(s->base)->h_refcount == cpu_to_le32(1)) {
1861 __u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash);
1862
1863 /*
1864 * This must happen under buffer lock for
1865 * ext4_xattr_block_set() to reliably detect modified
1866 * block
1867 */
1868 if (ea_block_cache)
1869 mb_cache_entry_delete(ea_block_cache, hash,
1870 bs->bh->b_blocknr);
1871 ea_bdebug(bs->bh, "modifying in-place");
1872 error = ext4_xattr_set_entry(i, s, handle, inode,
1873 true /* is_block */);
1874 ext4_xattr_block_csum_set(inode, bs->bh);
1875 unlock_buffer(bs->bh);
1876 if (error == -EFSCORRUPTED)
1877 goto bad_block;
1878 if (!error)
1879 error = ext4_handle_dirty_metadata(handle,
1880 inode,
1881 bs->bh);
1882 if (error)
1883 goto cleanup;
1884 goto inserted;
1885 } else {
1886 int offset = (char *)s->here - bs->bh->b_data;
1887
1888 unlock_buffer(bs->bh);
1889 ea_bdebug(bs->bh, "cloning");
1890 s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
1891 error = -ENOMEM;
1892 if (s->base == NULL)
1893 goto cleanup;
1894 memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
1895 s->first = ENTRY(header(s->base)+1);
1896 header(s->base)->h_refcount = cpu_to_le32(1);
1897 s->here = ENTRY(s->base + offset);
1898 s->end = s->base + bs->bh->b_size;
1899
1900 /*
1901 * If existing entry points to an xattr inode, we need
1902 * to prevent ext4_xattr_set_entry() from decrementing
1903 * ref count on it because the reference belongs to the
1904 * original block. In this case, make the entry look
1905 * like it has an empty value.
1906 */
1907 if (!s->not_found && s->here->e_value_inum) {
1908 ea_ino = le32_to_cpu(s->here->e_value_inum);
1909 error = ext4_xattr_inode_iget(inode, ea_ino,
1910 le32_to_cpu(s->here->e_hash),
1911 &tmp_inode);
1912 if (error)
1913 goto cleanup;
1914
1915 if (!ext4_test_inode_state(tmp_inode,
1916 EXT4_STATE_LUSTRE_EA_INODE)) {
1917 /*
1918 * Defer quota free call for previous
1919 * inode until success is guaranteed.
1920 */
1921 old_ea_inode_quota = le32_to_cpu(
1922 s->here->e_value_size);
1923 }
1924 iput(tmp_inode);
1925
1926 s->here->e_value_inum = 0;
1927 s->here->e_value_size = 0;
1928 }
1929 }
1930 } else {
1931 /* Allocate a buffer where we construct the new block. */
1932 s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
1933 /* assert(header == s->base) */
1934 error = -ENOMEM;
1935 if (s->base == NULL)
1936 goto cleanup;
1937 header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
1938 header(s->base)->h_blocks = cpu_to_le32(1);
1939 header(s->base)->h_refcount = cpu_to_le32(1);
1940 s->first = ENTRY(header(s->base)+1);
1941 s->here = ENTRY(header(s->base)+1);
1942 s->end = s->base + sb->s_blocksize;
1943 }
1944
1945 error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
1946 if (error == -EFSCORRUPTED)
1947 goto bad_block;
1948 if (error)
1949 goto cleanup;
1950
1951 if (i->value && s->here->e_value_inum) {
1952 /*
1953 * A ref count on ea_inode has been taken as part of the call to
1954 * ext4_xattr_set_entry() above. We would like to drop this
1955 * extra ref but we have to wait until the xattr block is
1956 * initialized and has its own ref count on the ea_inode.
1957 */
1958 ea_ino = le32_to_cpu(s->here->e_value_inum);
1959 error = ext4_xattr_inode_iget(inode, ea_ino,
1960 le32_to_cpu(s->here->e_hash),
1961 &ea_inode);
1962 if (error) {
1963 ea_inode = NULL;
1964 goto cleanup;
1965 }
1966 }
1967
1968inserted:
1969 if (!IS_LAST_ENTRY(s->first)) {
1970 new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
1971 &ce);
1972 if (new_bh) {
1973 /* We found an identical block in the cache. */
1974 if (new_bh == bs->bh)
1975 ea_bdebug(new_bh, "keeping");
1976 else {
1977 u32 ref;
1978
1979 WARN_ON_ONCE(dquot_initialize_needed(inode));
1980
1981 /* The old block is released after updating
1982 the inode. */
1983 error = dquot_alloc_block(inode,
1984 EXT4_C2B(EXT4_SB(sb), 1));
1985 if (error)
1986 goto cleanup;
1987 BUFFER_TRACE(new_bh, "get_write_access");
1988 error = ext4_journal_get_write_access(handle,
1989 new_bh);
1990 if (error)
1991 goto cleanup_dquot;
1992 lock_buffer(new_bh);
1993 /*
1994 * We have to be careful about races with
1995 * freeing, rehashing or adding references to
1996 * xattr block. Once we hold buffer lock xattr
1997 * block's state is stable so we can check
1998 * whether the block got freed / rehashed or
1999 * not. Since we unhash mbcache entry under
2000 * buffer lock when freeing / rehashing xattr
2001 * block, checking whether entry is still
2002 * hashed is reliable. Same rules hold for
2003 * e_reusable handling.
2004 */
2005 if (hlist_bl_unhashed(&ce->e_hash_list) ||
2006 !ce->e_reusable) {
2007 /*
2008 * Undo everything and check mbcache
2009 * again.
2010 */
2011 unlock_buffer(new_bh);
2012 dquot_free_block(inode,
2013 EXT4_C2B(EXT4_SB(sb),
2014 1));
2015 brelse(new_bh);
2016 mb_cache_entry_put(ea_block_cache, ce);
2017 ce = NULL;
2018 new_bh = NULL;
2019 goto inserted;
2020 }
2021 ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
2022 BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
2023 if (ref >= EXT4_XATTR_REFCOUNT_MAX)
2024 ce->e_reusable = 0;
2025 ea_bdebug(new_bh, "reusing; refcount now=%d",
2026 ref);
2027 ext4_xattr_block_csum_set(inode, new_bh);
2028 unlock_buffer(new_bh);
2029 error = ext4_handle_dirty_metadata(handle,
2030 inode,
2031 new_bh);
2032 if (error)
2033 goto cleanup_dquot;
2034 }
2035 mb_cache_entry_touch(ea_block_cache, ce);
2036 mb_cache_entry_put(ea_block_cache, ce);
2037 ce = NULL;
2038 } else if (bs->bh && s->base == bs->bh->b_data) {
2039 /* We were modifying this block in-place. */
2040 ea_bdebug(bs->bh, "keeping this block");
2041 ext4_xattr_block_cache_insert(ea_block_cache, bs->bh);
2042 new_bh = bs->bh;
2043 get_bh(new_bh);
2044 } else {
2045 /* We need to allocate a new block */
2046 ext4_fsblk_t goal, block;
2047
2048 WARN_ON_ONCE(dquot_initialize_needed(inode));
2049
2050 goal = ext4_group_first_block_no(sb,
2051 EXT4_I(inode)->i_block_group);
2052
2053 /* non-extent files can't have physical blocks past 2^32 */
2054 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
2055 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
2056
2057 block = ext4_new_meta_blocks(handle, inode, goal, 0,
2058 NULL, &error);
2059 if (error)
2060 goto cleanup;
2061
2062 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
2063 BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
2064
2065 ea_idebug(inode, "creating block %llu",
2066 (unsigned long long)block);
2067
2068 new_bh = sb_getblk(sb, block);
2069 if (unlikely(!new_bh)) {
2070 error = -ENOMEM;
2071getblk_failed:
2072 ext4_free_blocks(handle, inode, NULL, block, 1,
2073 EXT4_FREE_BLOCKS_METADATA);
2074 goto cleanup;
2075 }
2076 error = ext4_xattr_inode_inc_ref_all(handle, inode,
2077 ENTRY(header(s->base)+1));
2078 if (error)
2079 goto getblk_failed;
2080 if (ea_inode) {
2081 /* Drop the extra ref on ea_inode. */
2082 error = ext4_xattr_inode_dec_ref(handle,
2083 ea_inode);
2084 if (error)
2085 ext4_warning_inode(ea_inode,
2086 "dec ref error=%d",
2087 error);
2088 iput(ea_inode);
2089 ea_inode = NULL;
2090 }
2091
2092 lock_buffer(new_bh);
2093 error = ext4_journal_get_create_access(handle, new_bh);
2094 if (error) {
2095 unlock_buffer(new_bh);
2096 error = -EIO;
2097 goto getblk_failed;
2098 }
2099 memcpy(new_bh->b_data, s->base, new_bh->b_size);
2100 ext4_xattr_block_csum_set(inode, new_bh);
2101 set_buffer_uptodate(new_bh);
2102 unlock_buffer(new_bh);
2103 ext4_xattr_block_cache_insert(ea_block_cache, new_bh);
2104 error = ext4_handle_dirty_metadata(handle, inode,
2105 new_bh);
2106 if (error)
2107 goto cleanup;
2108 }
2109 }
2110
2111 if (old_ea_inode_quota)
2112 ext4_xattr_inode_free_quota(inode, NULL, old_ea_inode_quota);
2113
2114 /* Update the inode. */
2115 EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
2116
2117 /* Drop the previous xattr block. */
2118 if (bs->bh && bs->bh != new_bh) {
2119 struct ext4_xattr_inode_array *ea_inode_array = NULL;
2120
2121 ext4_xattr_release_block(handle, inode, bs->bh,
2122 &ea_inode_array,
2123 0 /* extra_credits */);
2124 ext4_xattr_inode_array_free(ea_inode_array);
2125 }
2126 error = 0;
2127
2128cleanup:
2129 if (ea_inode) {
2130 int error2;
2131
2132 error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
2133 if (error2)
2134 ext4_warning_inode(ea_inode, "dec ref error=%d",
2135 error2);
2136
2137 /* If there was an error, revert the quota charge. */
2138 if (error)
2139 ext4_xattr_inode_free_quota(inode, ea_inode,
2140 i_size_read(ea_inode));
2141 iput(ea_inode);
2142 }
2143 if (ce)
2144 mb_cache_entry_put(ea_block_cache, ce);
2145 brelse(new_bh);
2146 if (!(bs->bh && s->base == bs->bh->b_data))
2147 kfree(s->base);
2148
2149 return error;
2150
2151cleanup_dquot:
2152 dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
2153 goto cleanup;
2154
2155bad_block:
2156 EXT4_ERROR_INODE(inode, "bad block %llu",
2157 EXT4_I(inode)->i_file_acl);
2158 goto cleanup;
2159
2160#undef header
2161}
2162
2163int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
2164 struct ext4_xattr_ibody_find *is)
2165{
2166 struct ext4_xattr_ibody_header *header;
2167 struct ext4_inode *raw_inode;
2168 int error;
2169
Olivier Deprez92d4c212022-12-06 15:05:30 +01002170 if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002171 return 0;
Olivier Deprez92d4c212022-12-06 15:05:30 +01002172
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002173 raw_inode = ext4_raw_inode(&is->iloc);
2174 header = IHDR(inode, raw_inode);
2175 is->s.base = is->s.first = IFIRST(header);
2176 is->s.here = is->s.first;
2177 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
2178 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
2179 error = xattr_check_inode(inode, header, is->s.end);
2180 if (error)
2181 return error;
2182 /* Find the named attribute. */
2183 error = xattr_find_entry(inode, &is->s.here, is->s.end,
2184 i->name_index, i->name, 0);
2185 if (error && error != -ENODATA)
2186 return error;
2187 is->s.not_found = error;
2188 }
2189 return 0;
2190}
2191
2192int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
2193 struct ext4_xattr_info *i,
2194 struct ext4_xattr_ibody_find *is)
2195{
2196 struct ext4_xattr_ibody_header *header;
2197 struct ext4_xattr_search *s = &is->s;
2198 int error;
2199
Olivier Deprez92d4c212022-12-06 15:05:30 +01002200 if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002201 return -ENOSPC;
Olivier Deprez92d4c212022-12-06 15:05:30 +01002202
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002203 error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
2204 if (error)
2205 return error;
2206 header = IHDR(inode, ext4_raw_inode(&is->iloc));
2207 if (!IS_LAST_ENTRY(s->first)) {
2208 header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
2209 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
2210 } else {
2211 header->h_magic = cpu_to_le32(0);
2212 ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
2213 }
2214 return 0;
2215}
2216
2217static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
2218 struct ext4_xattr_info *i,
2219 struct ext4_xattr_ibody_find *is)
2220{
2221 struct ext4_xattr_ibody_header *header;
2222 struct ext4_xattr_search *s = &is->s;
2223 int error;
2224
2225 if (EXT4_I(inode)->i_extra_isize == 0)
2226 return -ENOSPC;
2227 error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
2228 if (error)
2229 return error;
2230 header = IHDR(inode, ext4_raw_inode(&is->iloc));
2231 if (!IS_LAST_ENTRY(s->first)) {
2232 header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
2233 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
2234 } else {
2235 header->h_magic = cpu_to_le32(0);
2236 ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
2237 }
2238 return 0;
2239}
2240
2241static int ext4_xattr_value_same(struct ext4_xattr_search *s,
2242 struct ext4_xattr_info *i)
2243{
2244 void *value;
2245
2246 /* When e_value_inum is set the value is stored externally. */
2247 if (s->here->e_value_inum)
2248 return 0;
2249 if (le32_to_cpu(s->here->e_value_size) != i->value_len)
2250 return 0;
2251 value = ((void *)s->base) + le16_to_cpu(s->here->e_value_offs);
2252 return !memcmp(value, i->value, i->value_len);
2253}
2254
2255static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
2256{
2257 struct buffer_head *bh;
2258 int error;
2259
2260 if (!EXT4_I(inode)->i_file_acl)
2261 return NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00002262 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
2263 if (IS_ERR(bh))
2264 return bh;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002265 error = ext4_xattr_check_block(inode, bh);
2266 if (error) {
2267 brelse(bh);
2268 return ERR_PTR(error);
2269 }
2270 return bh;
2271}
2272
2273/*
2274 * ext4_xattr_set_handle()
2275 *
2276 * Create, replace or remove an extended attribute for this inode. Value
2277 * is NULL to remove an existing extended attribute, and non-NULL to
2278 * either replace an existing extended attribute, or create a new extended
2279 * attribute. The flags XATTR_REPLACE and XATTR_CREATE
2280 * specify that an extended attribute must exist and must not exist
2281 * previous to the call, respectively.
2282 *
2283 * Returns 0, or a negative error number on failure.
2284 */
2285int
2286ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
2287 const char *name, const void *value, size_t value_len,
2288 int flags)
2289{
2290 struct ext4_xattr_info i = {
2291 .name_index = name_index,
2292 .name = name,
2293 .value = value,
2294 .value_len = value_len,
2295 .in_inode = 0,
2296 };
2297 struct ext4_xattr_ibody_find is = {
2298 .s = { .not_found = -ENODATA, },
2299 };
2300 struct ext4_xattr_block_find bs = {
2301 .s = { .not_found = -ENODATA, },
2302 };
2303 int no_expand;
2304 int error;
2305
2306 if (!name)
2307 return -EINVAL;
2308 if (strlen(name) > 255)
2309 return -ERANGE;
2310
2311 ext4_write_lock_xattr(inode, &no_expand);
2312
2313 /* Check journal credits under write lock. */
2314 if (ext4_handle_valid(handle)) {
2315 struct buffer_head *bh;
2316 int credits;
2317
2318 bh = ext4_xattr_get_block(inode);
2319 if (IS_ERR(bh)) {
2320 error = PTR_ERR(bh);
2321 goto cleanup;
2322 }
2323
2324 credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh,
2325 value_len,
2326 flags & XATTR_CREATE);
2327 brelse(bh);
2328
Olivier Deprez157378f2022-04-04 15:47:50 +02002329 if (jbd2_handle_buffer_credits(handle) < credits) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002330 error = -ENOSPC;
2331 goto cleanup;
2332 }
Olivier Deprez0e641232021-09-23 10:07:05 +02002333 WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002334 }
2335
2336 error = ext4_reserve_inode_write(handle, inode, &is.iloc);
2337 if (error)
2338 goto cleanup;
2339
2340 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
2341 struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
2342 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2343 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
2344 }
2345
2346 error = ext4_xattr_ibody_find(inode, &i, &is);
2347 if (error)
2348 goto cleanup;
2349 if (is.s.not_found)
2350 error = ext4_xattr_block_find(inode, &i, &bs);
2351 if (error)
2352 goto cleanup;
2353 if (is.s.not_found && bs.s.not_found) {
2354 error = -ENODATA;
2355 if (flags & XATTR_REPLACE)
2356 goto cleanup;
2357 error = 0;
2358 if (!value)
2359 goto cleanup;
2360 } else {
2361 error = -EEXIST;
2362 if (flags & XATTR_CREATE)
2363 goto cleanup;
2364 }
2365
2366 if (!value) {
2367 if (!is.s.not_found)
2368 error = ext4_xattr_ibody_set(handle, inode, &i, &is);
2369 else if (!bs.s.not_found)
2370 error = ext4_xattr_block_set(handle, inode, &i, &bs);
2371 } else {
2372 error = 0;
2373 /* Xattr value did not change? Save us some work and bail out */
2374 if (!is.s.not_found && ext4_xattr_value_same(&is.s, &i))
2375 goto cleanup;
2376 if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i))
2377 goto cleanup;
2378
2379 if (ext4_has_feature_ea_inode(inode->i_sb) &&
2380 (EXT4_XATTR_SIZE(i.value_len) >
2381 EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize)))
2382 i.in_inode = 1;
2383retry_inode:
2384 error = ext4_xattr_ibody_set(handle, inode, &i, &is);
2385 if (!error && !bs.s.not_found) {
2386 i.value = NULL;
2387 error = ext4_xattr_block_set(handle, inode, &i, &bs);
2388 } else if (error == -ENOSPC) {
2389 if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
2390 brelse(bs.bh);
2391 bs.bh = NULL;
2392 error = ext4_xattr_block_find(inode, &i, &bs);
2393 if (error)
2394 goto cleanup;
2395 }
2396 error = ext4_xattr_block_set(handle, inode, &i, &bs);
2397 if (!error && !is.s.not_found) {
2398 i.value = NULL;
2399 error = ext4_xattr_ibody_set(handle, inode, &i,
2400 &is);
2401 } else if (error == -ENOSPC) {
2402 /*
2403 * Xattr does not fit in the block, store at
2404 * external inode if possible.
2405 */
2406 if (ext4_has_feature_ea_inode(inode->i_sb) &&
Olivier Deprez0e641232021-09-23 10:07:05 +02002407 i.value_len && !i.in_inode) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002408 i.in_inode = 1;
2409 goto retry_inode;
2410 }
2411 }
2412 }
2413 }
2414 if (!error) {
2415 ext4_xattr_update_super_block(handle, inode->i_sb);
2416 inode->i_ctime = current_time(inode);
2417 if (!value)
2418 no_expand = 0;
2419 error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
2420 /*
2421 * The bh is consumed by ext4_mark_iloc_dirty, even with
2422 * error != 0.
2423 */
2424 is.iloc.bh = NULL;
2425 if (IS_SYNC(inode))
2426 ext4_handle_sync(handle);
2427 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002428 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002429
2430cleanup:
2431 brelse(is.iloc.bh);
2432 brelse(bs.bh);
2433 ext4_write_unlock_xattr(inode, &no_expand);
2434 return error;
2435}
2436
2437int ext4_xattr_set_credits(struct inode *inode, size_t value_len,
2438 bool is_create, int *credits)
2439{
2440 struct buffer_head *bh;
2441 int err;
2442
2443 *credits = 0;
2444
2445 if (!EXT4_SB(inode->i_sb)->s_journal)
2446 return 0;
2447
2448 down_read(&EXT4_I(inode)->xattr_sem);
2449
2450 bh = ext4_xattr_get_block(inode);
2451 if (IS_ERR(bh)) {
2452 err = PTR_ERR(bh);
2453 } else {
2454 *credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh,
2455 value_len, is_create);
2456 brelse(bh);
2457 err = 0;
2458 }
2459
2460 up_read(&EXT4_I(inode)->xattr_sem);
2461 return err;
2462}
2463
2464/*
2465 * ext4_xattr_set()
2466 *
2467 * Like ext4_xattr_set_handle, but start from an inode. This extended
2468 * attribute modification is a filesystem transaction by itself.
2469 *
2470 * Returns 0, or a negative error number on failure.
2471 */
2472int
2473ext4_xattr_set(struct inode *inode, int name_index, const char *name,
2474 const void *value, size_t value_len, int flags)
2475{
2476 handle_t *handle;
2477 struct super_block *sb = inode->i_sb;
2478 int error, retries = 0;
2479 int credits;
2480
2481 error = dquot_initialize(inode);
2482 if (error)
2483 return error;
2484
2485retry:
2486 error = ext4_xattr_set_credits(inode, value_len, flags & XATTR_CREATE,
2487 &credits);
2488 if (error)
2489 return error;
2490
2491 handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
2492 if (IS_ERR(handle)) {
2493 error = PTR_ERR(handle);
2494 } else {
2495 int error2;
2496
2497 error = ext4_xattr_set_handle(handle, inode, name_index, name,
2498 value, value_len, flags);
2499 error2 = ext4_journal_stop(handle);
2500 if (error == -ENOSPC &&
2501 ext4_should_retry_alloc(sb, &retries))
2502 goto retry;
2503 if (error == 0)
2504 error = error2;
2505 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002506 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002507
2508 return error;
2509}
2510
2511/*
2512 * Shift the EA entries in the inode to create space for the increased
2513 * i_extra_isize.
2514 */
2515static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
2516 int value_offs_shift, void *to,
2517 void *from, size_t n)
2518{
2519 struct ext4_xattr_entry *last = entry;
2520 int new_offs;
2521
2522 /* We always shift xattr headers further thus offsets get lower */
2523 BUG_ON(value_offs_shift > 0);
2524
2525 /* Adjust the value offsets of the entries */
2526 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
2527 if (!last->e_value_inum && last->e_value_size) {
2528 new_offs = le16_to_cpu(last->e_value_offs) +
2529 value_offs_shift;
2530 last->e_value_offs = cpu_to_le16(new_offs);
2531 }
2532 }
2533 /* Shift the entries by n bytes */
2534 memmove(to, from, n);
2535}
2536
2537/*
2538 * Move xattr pointed to by 'entry' from inode into external xattr block
2539 */
2540static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
2541 struct ext4_inode *raw_inode,
2542 struct ext4_xattr_entry *entry)
2543{
2544 struct ext4_xattr_ibody_find *is = NULL;
2545 struct ext4_xattr_block_find *bs = NULL;
2546 char *buffer = NULL, *b_entry_name = NULL;
2547 size_t value_size = le32_to_cpu(entry->e_value_size);
2548 struct ext4_xattr_info i = {
2549 .value = NULL,
2550 .value_len = 0,
2551 .name_index = entry->e_name_index,
2552 .in_inode = !!entry->e_value_inum,
2553 };
2554 struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
2555 int error;
2556
2557 is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
2558 bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
2559 buffer = kmalloc(value_size, GFP_NOFS);
2560 b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
2561 if (!is || !bs || !buffer || !b_entry_name) {
2562 error = -ENOMEM;
2563 goto out;
2564 }
2565
2566 is->s.not_found = -ENODATA;
2567 bs->s.not_found = -ENODATA;
2568 is->iloc.bh = NULL;
2569 bs->bh = NULL;
2570
2571 /* Save the entry name and the entry value */
2572 if (entry->e_value_inum) {
2573 error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
2574 if (error)
2575 goto out;
2576 } else {
2577 size_t value_offs = le16_to_cpu(entry->e_value_offs);
2578 memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size);
2579 }
2580
2581 memcpy(b_entry_name, entry->e_name, entry->e_name_len);
2582 b_entry_name[entry->e_name_len] = '\0';
2583 i.name = b_entry_name;
2584
2585 error = ext4_get_inode_loc(inode, &is->iloc);
2586 if (error)
2587 goto out;
2588
2589 error = ext4_xattr_ibody_find(inode, &i, is);
2590 if (error)
2591 goto out;
2592
2593 /* Remove the chosen entry from the inode */
2594 error = ext4_xattr_ibody_set(handle, inode, &i, is);
2595 if (error)
2596 goto out;
2597
2598 i.value = buffer;
2599 i.value_len = value_size;
2600 error = ext4_xattr_block_find(inode, &i, bs);
2601 if (error)
2602 goto out;
2603
2604 /* Add entry which was removed from the inode into the block */
2605 error = ext4_xattr_block_set(handle, inode, &i, bs);
2606 if (error)
2607 goto out;
2608 error = 0;
2609out:
2610 kfree(b_entry_name);
2611 kfree(buffer);
2612 if (is)
2613 brelse(is->iloc.bh);
2614 if (bs)
2615 brelse(bs->bh);
2616 kfree(is);
2617 kfree(bs);
2618
2619 return error;
2620}
2621
2622static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
2623 struct ext4_inode *raw_inode,
2624 int isize_diff, size_t ifree,
2625 size_t bfree, int *total_ino)
2626{
2627 struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
2628 struct ext4_xattr_entry *small_entry;
2629 struct ext4_xattr_entry *entry;
2630 struct ext4_xattr_entry *last;
2631 unsigned int entry_size; /* EA entry size */
2632 unsigned int total_size; /* EA entry size + value size */
2633 unsigned int min_total_size;
2634 int error;
2635
2636 while (isize_diff > ifree) {
2637 entry = NULL;
2638 small_entry = NULL;
2639 min_total_size = ~0U;
2640 last = IFIRST(header);
2641 /* Find the entry best suited to be pushed into EA block */
2642 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
2643 /* never move system.data out of the inode */
2644 if ((last->e_name_len == 4) &&
2645 (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
2646 !memcmp(last->e_name, "data", 4))
2647 continue;
2648 total_size = EXT4_XATTR_LEN(last->e_name_len);
2649 if (!last->e_value_inum)
2650 total_size += EXT4_XATTR_SIZE(
2651 le32_to_cpu(last->e_value_size));
2652 if (total_size <= bfree &&
2653 total_size < min_total_size) {
2654 if (total_size + ifree < isize_diff) {
2655 small_entry = last;
2656 } else {
2657 entry = last;
2658 min_total_size = total_size;
2659 }
2660 }
2661 }
2662
2663 if (entry == NULL) {
2664 if (small_entry == NULL)
2665 return -ENOSPC;
2666 entry = small_entry;
2667 }
2668
2669 entry_size = EXT4_XATTR_LEN(entry->e_name_len);
2670 total_size = entry_size;
2671 if (!entry->e_value_inum)
2672 total_size += EXT4_XATTR_SIZE(
2673 le32_to_cpu(entry->e_value_size));
2674 error = ext4_xattr_move_to_block(handle, inode, raw_inode,
2675 entry);
2676 if (error)
2677 return error;
2678
2679 *total_ino -= entry_size;
2680 ifree += total_size;
2681 bfree -= total_size;
2682 }
2683
2684 return 0;
2685}
2686
2687/*
2688 * Expand an inode by new_extra_isize bytes when EAs are present.
2689 * Returns 0 on success or negative error number on failure.
2690 */
2691int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
2692 struct ext4_inode *raw_inode, handle_t *handle)
2693{
2694 struct ext4_xattr_ibody_header *header;
2695 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2696 static unsigned int mnt_count;
2697 size_t min_offs;
2698 size_t ifree, bfree;
2699 int total_ino;
2700 void *base, *end;
2701 int error = 0, tried_min_extra_isize = 0;
2702 int s_min_extra_isize = le16_to_cpu(sbi->s_es->s_min_extra_isize);
2703 int isize_diff; /* How much do we need to grow i_extra_isize */
2704
2705retry:
2706 isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
2707 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
2708 return 0;
2709
2710 header = IHDR(inode, raw_inode);
2711
2712 /*
2713 * Check if enough free space is available in the inode to shift the
2714 * entries ahead by new_extra_isize.
2715 */
2716
2717 base = IFIRST(header);
2718 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
2719 min_offs = end - base;
David Brazdil0f672f62019-12-10 10:32:29 +00002720 total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002721
2722 error = xattr_check_inode(inode, header, end);
2723 if (error)
2724 goto cleanup;
2725
2726 ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
2727 if (ifree >= isize_diff)
2728 goto shift;
2729
2730 /*
2731 * Enough free space isn't available in the inode, check if
2732 * EA block can hold new_extra_isize bytes.
2733 */
2734 if (EXT4_I(inode)->i_file_acl) {
2735 struct buffer_head *bh;
2736
David Brazdil0f672f62019-12-10 10:32:29 +00002737 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
2738 if (IS_ERR(bh)) {
2739 error = PTR_ERR(bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002740 goto cleanup;
David Brazdil0f672f62019-12-10 10:32:29 +00002741 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002742 error = ext4_xattr_check_block(inode, bh);
2743 if (error) {
2744 brelse(bh);
2745 goto cleanup;
2746 }
2747 base = BHDR(bh);
2748 end = bh->b_data + bh->b_size;
2749 min_offs = end - base;
2750 bfree = ext4_xattr_free_space(BFIRST(bh), &min_offs, base,
2751 NULL);
2752 brelse(bh);
2753 if (bfree + ifree < isize_diff) {
2754 if (!tried_min_extra_isize && s_min_extra_isize) {
2755 tried_min_extra_isize++;
2756 new_extra_isize = s_min_extra_isize;
2757 goto retry;
2758 }
2759 error = -ENOSPC;
2760 goto cleanup;
2761 }
2762 } else {
2763 bfree = inode->i_sb->s_blocksize;
2764 }
2765
2766 error = ext4_xattr_make_inode_space(handle, inode, raw_inode,
2767 isize_diff, ifree, bfree,
2768 &total_ino);
2769 if (error) {
2770 if (error == -ENOSPC && !tried_min_extra_isize &&
2771 s_min_extra_isize) {
2772 tried_min_extra_isize++;
2773 new_extra_isize = s_min_extra_isize;
2774 goto retry;
2775 }
2776 goto cleanup;
2777 }
2778shift:
2779 /* Adjust the offsets and shift the remaining entries ahead */
2780 ext4_xattr_shift_entries(IFIRST(header), EXT4_I(inode)->i_extra_isize
2781 - new_extra_isize, (void *)raw_inode +
2782 EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
2783 (void *)header, total_ino);
2784 EXT4_I(inode)->i_extra_isize = new_extra_isize;
2785
2786cleanup:
2787 if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) {
2788 ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.",
2789 inode->i_ino);
2790 mnt_count = le16_to_cpu(sbi->s_es->s_mnt_count);
2791 }
2792 return error;
2793}
2794
2795#define EIA_INCR 16 /* must be 2^n */
2796#define EIA_MASK (EIA_INCR - 1)
2797
2798/* Add the large xattr @inode into @ea_inode_array for deferred iput().
2799 * If @ea_inode_array is new or full it will be grown and the old
2800 * contents copied over.
2801 */
2802static int
2803ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
2804 struct inode *inode)
2805{
2806 if (*ea_inode_array == NULL) {
2807 /*
2808 * Start with 15 inodes, so it fits into a power-of-two size.
2809 * If *ea_inode_array is NULL, this is essentially offsetof()
2810 */
2811 (*ea_inode_array) =
2812 kmalloc(offsetof(struct ext4_xattr_inode_array,
2813 inodes[EIA_MASK]),
2814 GFP_NOFS);
2815 if (*ea_inode_array == NULL)
2816 return -ENOMEM;
2817 (*ea_inode_array)->count = 0;
2818 } else if (((*ea_inode_array)->count & EIA_MASK) == EIA_MASK) {
2819 /* expand the array once all 15 + n * 16 slots are full */
2820 struct ext4_xattr_inode_array *new_array = NULL;
2821 int count = (*ea_inode_array)->count;
2822
2823 /* if new_array is NULL, this is essentially offsetof() */
2824 new_array = kmalloc(
2825 offsetof(struct ext4_xattr_inode_array,
2826 inodes[count + EIA_INCR]),
2827 GFP_NOFS);
2828 if (new_array == NULL)
2829 return -ENOMEM;
2830 memcpy(new_array, *ea_inode_array,
2831 offsetof(struct ext4_xattr_inode_array, inodes[count]));
2832 kfree(*ea_inode_array);
2833 *ea_inode_array = new_array;
2834 }
2835 (*ea_inode_array)->inodes[(*ea_inode_array)->count++] = inode;
2836 return 0;
2837}
2838
2839/*
2840 * ext4_xattr_delete_inode()
2841 *
2842 * Free extended attribute resources associated with this inode. Traverse
2843 * all entries and decrement reference on any xattr inodes associated with this
2844 * inode. This is called immediately before an inode is freed. We have exclusive
2845 * access to the inode. If an orphan inode is deleted it will also release its
2846 * references on xattr block and xattr inodes.
2847 */
2848int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
2849 struct ext4_xattr_inode_array **ea_inode_array,
2850 int extra_credits)
2851{
2852 struct buffer_head *bh = NULL;
2853 struct ext4_xattr_ibody_header *header;
2854 struct ext4_iloc iloc = { .bh = NULL };
2855 struct ext4_xattr_entry *entry;
2856 struct inode *ea_inode;
2857 int error;
2858
Olivier Deprez157378f2022-04-04 15:47:50 +02002859 error = ext4_journal_ensure_credits(handle, extra_credits,
2860 ext4_free_metadata_revoke_credits(inode->i_sb, 1));
2861 if (error < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002862 EXT4_ERROR_INODE(inode, "ensure credits (error %d)", error);
2863 goto cleanup;
2864 }
2865
2866 if (ext4_has_feature_ea_inode(inode->i_sb) &&
2867 ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
2868
2869 error = ext4_get_inode_loc(inode, &iloc);
2870 if (error) {
2871 EXT4_ERROR_INODE(inode, "inode loc (error %d)", error);
2872 goto cleanup;
2873 }
2874
2875 error = ext4_journal_get_write_access(handle, iloc.bh);
2876 if (error) {
2877 EXT4_ERROR_INODE(inode, "write access (error %d)",
2878 error);
2879 goto cleanup;
2880 }
2881
2882 header = IHDR(inode, ext4_raw_inode(&iloc));
2883 if (header->h_magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2884 ext4_xattr_inode_dec_ref_all(handle, inode, iloc.bh,
2885 IFIRST(header),
2886 false /* block_csum */,
2887 ea_inode_array,
2888 extra_credits,
2889 false /* skip_quota */);
2890 }
2891
2892 if (EXT4_I(inode)->i_file_acl) {
David Brazdil0f672f62019-12-10 10:32:29 +00002893 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
2894 if (IS_ERR(bh)) {
2895 error = PTR_ERR(bh);
Olivier Deprez157378f2022-04-04 15:47:50 +02002896 if (error == -EIO) {
2897 EXT4_ERROR_INODE_ERR(inode, EIO,
2898 "block %llu read error",
2899 EXT4_I(inode)->i_file_acl);
2900 }
David Brazdil0f672f62019-12-10 10:32:29 +00002901 bh = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002902 goto cleanup;
2903 }
2904 error = ext4_xattr_check_block(inode, bh);
2905 if (error)
2906 goto cleanup;
2907
2908 if (ext4_has_feature_ea_inode(inode->i_sb)) {
2909 for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
2910 entry = EXT4_XATTR_NEXT(entry)) {
2911 if (!entry->e_value_inum)
2912 continue;
2913 error = ext4_xattr_inode_iget(inode,
2914 le32_to_cpu(entry->e_value_inum),
2915 le32_to_cpu(entry->e_hash),
2916 &ea_inode);
2917 if (error)
2918 continue;
2919 ext4_xattr_inode_free_quota(inode, ea_inode,
2920 le32_to_cpu(entry->e_value_size));
2921 iput(ea_inode);
2922 }
2923
2924 }
2925
2926 ext4_xattr_release_block(handle, inode, bh, ea_inode_array,
2927 extra_credits);
2928 /*
2929 * Update i_file_acl value in the same transaction that releases
2930 * block.
2931 */
2932 EXT4_I(inode)->i_file_acl = 0;
2933 error = ext4_mark_inode_dirty(handle, inode);
2934 if (error) {
2935 EXT4_ERROR_INODE(inode, "mark inode dirty (error %d)",
2936 error);
2937 goto cleanup;
2938 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002939 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002940 }
2941 error = 0;
2942cleanup:
2943 brelse(iloc.bh);
2944 brelse(bh);
2945 return error;
2946}
2947
2948void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *ea_inode_array)
2949{
2950 int idx;
2951
2952 if (ea_inode_array == NULL)
2953 return;
2954
2955 for (idx = 0; idx < ea_inode_array->count; ++idx)
2956 iput(ea_inode_array->inodes[idx]);
2957 kfree(ea_inode_array);
2958}
2959
2960/*
2961 * ext4_xattr_block_cache_insert()
2962 *
2963 * Create a new entry in the extended attribute block cache, and insert
2964 * it unless such an entry is already in the cache.
2965 *
2966 * Returns 0, or a negative error number on failure.
2967 */
2968static void
2969ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
2970 struct buffer_head *bh)
2971{
2972 struct ext4_xattr_header *header = BHDR(bh);
2973 __u32 hash = le32_to_cpu(header->h_hash);
2974 int reusable = le32_to_cpu(header->h_refcount) <
2975 EXT4_XATTR_REFCOUNT_MAX;
2976 int error;
2977
2978 if (!ea_block_cache)
2979 return;
2980 error = mb_cache_entry_create(ea_block_cache, GFP_NOFS, hash,
2981 bh->b_blocknr, reusable);
2982 if (error) {
2983 if (error == -EBUSY)
2984 ea_bdebug(bh, "already in cache");
2985 } else
2986 ea_bdebug(bh, "inserting [%x]", (int)hash);
2987}
2988
2989/*
2990 * ext4_xattr_cmp()
2991 *
2992 * Compare two extended attribute blocks for equality.
2993 *
2994 * Returns 0 if the blocks are equal, 1 if they differ, and
2995 * a negative error number on errors.
2996 */
2997static int
2998ext4_xattr_cmp(struct ext4_xattr_header *header1,
2999 struct ext4_xattr_header *header2)
3000{
3001 struct ext4_xattr_entry *entry1, *entry2;
3002
3003 entry1 = ENTRY(header1+1);
3004 entry2 = ENTRY(header2+1);
3005 while (!IS_LAST_ENTRY(entry1)) {
3006 if (IS_LAST_ENTRY(entry2))
3007 return 1;
3008 if (entry1->e_hash != entry2->e_hash ||
3009 entry1->e_name_index != entry2->e_name_index ||
3010 entry1->e_name_len != entry2->e_name_len ||
3011 entry1->e_value_size != entry2->e_value_size ||
3012 entry1->e_value_inum != entry2->e_value_inum ||
3013 memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
3014 return 1;
3015 if (!entry1->e_value_inum &&
3016 memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
3017 (char *)header2 + le16_to_cpu(entry2->e_value_offs),
3018 le32_to_cpu(entry1->e_value_size)))
3019 return 1;
3020
3021 entry1 = EXT4_XATTR_NEXT(entry1);
3022 entry2 = EXT4_XATTR_NEXT(entry2);
3023 }
3024 if (!IS_LAST_ENTRY(entry2))
3025 return 1;
3026 return 0;
3027}
3028
3029/*
3030 * ext4_xattr_block_cache_find()
3031 *
3032 * Find an identical extended attribute block.
3033 *
3034 * Returns a pointer to the block found, or NULL if such a block was
3035 * not found or an error occurred.
3036 */
3037static struct buffer_head *
3038ext4_xattr_block_cache_find(struct inode *inode,
3039 struct ext4_xattr_header *header,
3040 struct mb_cache_entry **pce)
3041{
3042 __u32 hash = le32_to_cpu(header->h_hash);
3043 struct mb_cache_entry *ce;
3044 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
3045
3046 if (!ea_block_cache)
3047 return NULL;
3048 if (!header->h_hash)
3049 return NULL; /* never share */
3050 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
3051 ce = mb_cache_entry_find_first(ea_block_cache, hash);
3052 while (ce) {
3053 struct buffer_head *bh;
3054
David Brazdil0f672f62019-12-10 10:32:29 +00003055 bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
3056 if (IS_ERR(bh)) {
3057 if (PTR_ERR(bh) == -ENOMEM)
3058 return NULL;
3059 bh = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003060 EXT4_ERROR_INODE(inode, "block %lu read error",
3061 (unsigned long)ce->e_value);
3062 } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
3063 *pce = ce;
3064 return bh;
3065 }
3066 brelse(bh);
3067 ce = mb_cache_entry_find_next(ea_block_cache, ce);
3068 }
3069 return NULL;
3070}
3071
3072#define NAME_HASH_SHIFT 5
3073#define VALUE_HASH_SHIFT 16
3074
3075/*
3076 * ext4_xattr_hash_entry()
3077 *
3078 * Compute the hash of an extended attribute.
3079 */
3080static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
3081 size_t value_count)
3082{
3083 __u32 hash = 0;
3084
3085 while (name_len--) {
3086 hash = (hash << NAME_HASH_SHIFT) ^
3087 (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
3088 *name++;
3089 }
3090 while (value_count--) {
3091 hash = (hash << VALUE_HASH_SHIFT) ^
3092 (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
3093 le32_to_cpu(*value++);
3094 }
3095 return cpu_to_le32(hash);
3096}
3097
3098#undef NAME_HASH_SHIFT
3099#undef VALUE_HASH_SHIFT
3100
3101#define BLOCK_HASH_SHIFT 16
3102
3103/*
3104 * ext4_xattr_rehash()
3105 *
3106 * Re-compute the extended attribute hash value after an entry has changed.
3107 */
3108static void ext4_xattr_rehash(struct ext4_xattr_header *header)
3109{
3110 struct ext4_xattr_entry *here;
3111 __u32 hash = 0;
3112
3113 here = ENTRY(header+1);
3114 while (!IS_LAST_ENTRY(here)) {
3115 if (!here->e_hash) {
3116 /* Block is not shared if an entry's hash value == 0 */
3117 hash = 0;
3118 break;
3119 }
3120 hash = (hash << BLOCK_HASH_SHIFT) ^
3121 (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
3122 le32_to_cpu(here->e_hash);
3123 here = EXT4_XATTR_NEXT(here);
3124 }
3125 header->h_hash = cpu_to_le32(hash);
3126}
3127
3128#undef BLOCK_HASH_SHIFT
3129
3130#define HASH_BUCKET_BITS 10
3131
3132struct mb_cache *
3133ext4_xattr_create_cache(void)
3134{
3135 return mb_cache_create(HASH_BUCKET_BITS);
3136}
3137
3138void ext4_xattr_destroy_cache(struct mb_cache *cache)
3139{
3140 if (cache)
3141 mb_cache_destroy(cache);
3142}
3143