Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2013 Fusion IO. All rights reserved. |
| 4 | */ |
| 5 | |
| 6 | #include <linux/fs.h> |
| 7 | #include <linux/mount.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8 | #include <linux/pseudo_fs.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9 | #include <linux/magic.h> |
| 10 | #include "btrfs-tests.h" |
| 11 | #include "../ctree.h" |
| 12 | #include "../free-space-cache.h" |
| 13 | #include "../free-space-tree.h" |
| 14 | #include "../transaction.h" |
| 15 | #include "../volumes.h" |
| 16 | #include "../disk-io.h" |
| 17 | #include "../qgroup.h" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 18 | #include "../block-group.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 19 | |
| 20 | static struct vfsmount *test_mnt = NULL; |
| 21 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 22 | const char *test_error[] = { |
| 23 | [TEST_ALLOC_FS_INFO] = "cannot allocate fs_info", |
| 24 | [TEST_ALLOC_ROOT] = "cannot allocate root", |
| 25 | [TEST_ALLOC_EXTENT_BUFFER] = "cannot extent buffer", |
| 26 | [TEST_ALLOC_PATH] = "cannot allocate path", |
| 27 | [TEST_ALLOC_INODE] = "cannot allocate inode", |
| 28 | [TEST_ALLOC_BLOCK_GROUP] = "cannot allocate block group", |
| 29 | [TEST_ALLOC_EXTENT_MAP] = "cannot allocate extent map", |
| 30 | }; |
| 31 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 32 | static const struct super_operations btrfs_test_super_ops = { |
| 33 | .alloc_inode = btrfs_alloc_inode, |
| 34 | .destroy_inode = btrfs_test_destroy_inode, |
| 35 | }; |
| 36 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 37 | |
| 38 | static int btrfs_test_init_fs_context(struct fs_context *fc) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 39 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 40 | struct pseudo_fs_context *ctx = init_pseudo(fc, BTRFS_TEST_MAGIC); |
| 41 | if (!ctx) |
| 42 | return -ENOMEM; |
| 43 | ctx->ops = &btrfs_test_super_ops; |
| 44 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 45 | } |
| 46 | |
| 47 | static struct file_system_type test_type = { |
| 48 | .name = "btrfs_test_fs", |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 49 | .init_fs_context = btrfs_test_init_fs_context, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 50 | .kill_sb = kill_anon_super, |
| 51 | }; |
| 52 | |
| 53 | struct inode *btrfs_new_test_inode(void) |
| 54 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 55 | struct inode *inode; |
| 56 | |
| 57 | inode = new_inode(test_mnt->mnt_sb); |
| 58 | if (inode) |
| 59 | inode_init_owner(inode, NULL, S_IFREG); |
| 60 | |
| 61 | return inode; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | static int btrfs_init_test_fs(void) |
| 65 | { |
| 66 | int ret; |
| 67 | |
| 68 | ret = register_filesystem(&test_type); |
| 69 | if (ret) { |
| 70 | printk(KERN_ERR "btrfs: cannot register test file system\n"); |
| 71 | return ret; |
| 72 | } |
| 73 | |
| 74 | test_mnt = kern_mount(&test_type); |
| 75 | if (IS_ERR(test_mnt)) { |
| 76 | printk(KERN_ERR "btrfs: cannot mount test file system\n"); |
| 77 | unregister_filesystem(&test_type); |
| 78 | return PTR_ERR(test_mnt); |
| 79 | } |
| 80 | return 0; |
| 81 | } |
| 82 | |
| 83 | static void btrfs_destroy_test_fs(void) |
| 84 | { |
| 85 | kern_unmount(test_mnt); |
| 86 | unregister_filesystem(&test_type); |
| 87 | } |
| 88 | |
| 89 | struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize) |
| 90 | { |
| 91 | struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info), |
| 92 | GFP_KERNEL); |
| 93 | |
| 94 | if (!fs_info) |
| 95 | return fs_info; |
| 96 | fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices), |
| 97 | GFP_KERNEL); |
| 98 | if (!fs_info->fs_devices) { |
| 99 | kfree(fs_info); |
| 100 | return NULL; |
| 101 | } |
| 102 | fs_info->super_copy = kzalloc(sizeof(struct btrfs_super_block), |
| 103 | GFP_KERNEL); |
| 104 | if (!fs_info->super_copy) { |
| 105 | kfree(fs_info->fs_devices); |
| 106 | kfree(fs_info); |
| 107 | return NULL; |
| 108 | } |
| 109 | |
| 110 | fs_info->nodesize = nodesize; |
| 111 | fs_info->sectorsize = sectorsize; |
| 112 | |
| 113 | if (init_srcu_struct(&fs_info->subvol_srcu)) { |
| 114 | kfree(fs_info->fs_devices); |
| 115 | kfree(fs_info->super_copy); |
| 116 | kfree(fs_info); |
| 117 | return NULL; |
| 118 | } |
| 119 | |
| 120 | spin_lock_init(&fs_info->buffer_lock); |
| 121 | spin_lock_init(&fs_info->qgroup_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 122 | spin_lock_init(&fs_info->super_lock); |
| 123 | spin_lock_init(&fs_info->fs_roots_radix_lock); |
| 124 | spin_lock_init(&fs_info->tree_mod_seq_lock); |
| 125 | mutex_init(&fs_info->qgroup_ioctl_lock); |
| 126 | mutex_init(&fs_info->qgroup_rescan_lock); |
| 127 | rwlock_init(&fs_info->tree_mod_log_lock); |
| 128 | fs_info->running_transaction = NULL; |
| 129 | fs_info->qgroup_tree = RB_ROOT; |
| 130 | fs_info->qgroup_ulist = NULL; |
| 131 | atomic64_set(&fs_info->tree_mod_seq, 0); |
| 132 | INIT_LIST_HEAD(&fs_info->dirty_qgroups); |
| 133 | INIT_LIST_HEAD(&fs_info->dead_roots); |
| 134 | INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); |
| 135 | INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); |
| 136 | INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 137 | extent_io_tree_init(fs_info, &fs_info->freed_extents[0], |
| 138 | IO_TREE_FS_INFO_FREED_EXTENTS0, NULL); |
| 139 | extent_io_tree_init(fs_info, &fs_info->freed_extents[1], |
| 140 | IO_TREE_FS_INFO_FREED_EXTENTS1, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 141 | fs_info->pinned_extents = &fs_info->freed_extents[0]; |
| 142 | set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); |
| 143 | |
| 144 | test_mnt->mnt_sb->s_fs_info = fs_info; |
| 145 | |
| 146 | return fs_info; |
| 147 | } |
| 148 | |
| 149 | void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) |
| 150 | { |
| 151 | struct radix_tree_iter iter; |
| 152 | void **slot; |
| 153 | |
| 154 | if (!fs_info) |
| 155 | return; |
| 156 | |
| 157 | if (WARN_ON(!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, |
| 158 | &fs_info->fs_state))) |
| 159 | return; |
| 160 | |
| 161 | test_mnt->mnt_sb->s_fs_info = NULL; |
| 162 | |
| 163 | spin_lock(&fs_info->buffer_lock); |
| 164 | radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) { |
| 165 | struct extent_buffer *eb; |
| 166 | |
| 167 | eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock); |
| 168 | if (!eb) |
| 169 | continue; |
| 170 | /* Shouldn't happen but that kind of thinking creates CVE's */ |
| 171 | if (radix_tree_exception(eb)) { |
| 172 | if (radix_tree_deref_retry(eb)) |
| 173 | slot = radix_tree_iter_retry(&iter); |
| 174 | continue; |
| 175 | } |
| 176 | slot = radix_tree_iter_resume(slot, &iter); |
| 177 | spin_unlock(&fs_info->buffer_lock); |
| 178 | free_extent_buffer_stale(eb); |
| 179 | spin_lock(&fs_info->buffer_lock); |
| 180 | } |
| 181 | spin_unlock(&fs_info->buffer_lock); |
| 182 | |
| 183 | btrfs_free_qgroup_config(fs_info); |
| 184 | btrfs_free_fs_roots(fs_info); |
| 185 | cleanup_srcu_struct(&fs_info->subvol_srcu); |
| 186 | kfree(fs_info->super_copy); |
| 187 | kfree(fs_info->fs_devices); |
| 188 | kfree(fs_info); |
| 189 | } |
| 190 | |
| 191 | void btrfs_free_dummy_root(struct btrfs_root *root) |
| 192 | { |
| 193 | if (!root) |
| 194 | return; |
| 195 | /* Will be freed by btrfs_free_fs_roots */ |
| 196 | if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state))) |
| 197 | return; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 198 | if (root->node) { |
| 199 | /* One for allocate_extent_buffer */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 200 | free_extent_buffer(root->node); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 201 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 202 | kfree(root); |
| 203 | } |
| 204 | |
| 205 | struct btrfs_block_group_cache * |
| 206 | btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, |
| 207 | unsigned long length) |
| 208 | { |
| 209 | struct btrfs_block_group_cache *cache; |
| 210 | |
| 211 | cache = kzalloc(sizeof(*cache), GFP_KERNEL); |
| 212 | if (!cache) |
| 213 | return NULL; |
| 214 | cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), |
| 215 | GFP_KERNEL); |
| 216 | if (!cache->free_space_ctl) { |
| 217 | kfree(cache); |
| 218 | return NULL; |
| 219 | } |
| 220 | |
| 221 | cache->key.objectid = 0; |
| 222 | cache->key.offset = length; |
| 223 | cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; |
| 224 | cache->full_stripe_len = fs_info->sectorsize; |
| 225 | cache->fs_info = fs_info; |
| 226 | |
| 227 | INIT_LIST_HEAD(&cache->list); |
| 228 | INIT_LIST_HEAD(&cache->cluster_list); |
| 229 | INIT_LIST_HEAD(&cache->bg_list); |
| 230 | btrfs_init_free_space_ctl(cache); |
| 231 | mutex_init(&cache->free_space_lock); |
| 232 | |
| 233 | return cache; |
| 234 | } |
| 235 | |
| 236 | void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache) |
| 237 | { |
| 238 | if (!cache) |
| 239 | return; |
| 240 | __btrfs_remove_free_space_cache(cache->free_space_ctl); |
| 241 | kfree(cache->free_space_ctl); |
| 242 | kfree(cache); |
| 243 | } |
| 244 | |
| 245 | void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans, |
| 246 | struct btrfs_fs_info *fs_info) |
| 247 | { |
| 248 | memset(trans, 0, sizeof(*trans)); |
| 249 | trans->transid = 1; |
| 250 | trans->type = __TRANS_DUMMY; |
| 251 | trans->fs_info = fs_info; |
| 252 | } |
| 253 | |
| 254 | int btrfs_run_sanity_tests(void) |
| 255 | { |
| 256 | int ret, i; |
| 257 | u32 sectorsize, nodesize; |
| 258 | u32 test_sectorsize[] = { |
| 259 | PAGE_SIZE, |
| 260 | }; |
| 261 | ret = btrfs_init_test_fs(); |
| 262 | if (ret) |
| 263 | return ret; |
| 264 | for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) { |
| 265 | sectorsize = test_sectorsize[i]; |
| 266 | for (nodesize = sectorsize; |
| 267 | nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE; |
| 268 | nodesize <<= 1) { |
| 269 | pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n", |
| 270 | sectorsize, nodesize); |
| 271 | ret = btrfs_test_free_space_cache(sectorsize, nodesize); |
| 272 | if (ret) |
| 273 | goto out; |
| 274 | ret = btrfs_test_extent_buffer_operations(sectorsize, |
| 275 | nodesize); |
| 276 | if (ret) |
| 277 | goto out; |
| 278 | ret = btrfs_test_extent_io(sectorsize, nodesize); |
| 279 | if (ret) |
| 280 | goto out; |
| 281 | ret = btrfs_test_inodes(sectorsize, nodesize); |
| 282 | if (ret) |
| 283 | goto out; |
| 284 | ret = btrfs_test_qgroups(sectorsize, nodesize); |
| 285 | if (ret) |
| 286 | goto out; |
| 287 | ret = btrfs_test_free_space_tree(sectorsize, nodesize); |
| 288 | if (ret) |
| 289 | goto out; |
| 290 | } |
| 291 | } |
| 292 | ret = btrfs_test_extent_map(); |
| 293 | |
| 294 | out: |
| 295 | btrfs_destroy_test_fs(); |
| 296 | return ret; |
| 297 | } |