Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * Copyright (C) 2008 Oracle. All rights reserved. |
| 4 | */ |
| 5 | |
| 6 | #ifndef BTRFS_LOCKING_H |
| 7 | #define BTRFS_LOCKING_H |
| 8 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9 | #include <linux/atomic.h> |
| 10 | #include <linux/wait.h> |
| 11 | #include <linux/percpu_counter.h> |
| 12 | #include "extent_io.h" |
| 13 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 14 | #define BTRFS_WRITE_LOCK 1 |
| 15 | #define BTRFS_READ_LOCK 2 |
| 16 | #define BTRFS_WRITE_LOCK_BLOCKING 3 |
| 17 | #define BTRFS_READ_LOCK_BLOCKING 4 |
| 18 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 19 | /* |
| 20 | * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at |
| 21 | * the time of this patch is 8, which is how many we use. Keep this in mind if |
| 22 | * you decide you want to add another subclass. |
| 23 | */ |
| 24 | enum btrfs_lock_nesting { |
| 25 | BTRFS_NESTING_NORMAL, |
| 26 | |
| 27 | /* |
| 28 | * When we COW a block we are holding the lock on the original block, |
| 29 | * and since our lockdep maps are rootid+level, this confuses lockdep |
| 30 | * when we lock the newly allocated COW'd block. Handle this by having |
| 31 | * a subclass for COW'ed blocks so that lockdep doesn't complain. |
| 32 | */ |
| 33 | BTRFS_NESTING_COW, |
| 34 | |
| 35 | /* |
| 36 | * Oftentimes we need to lock adjacent nodes on the same level while |
| 37 | * still holding the lock on the original node we searched to, such as |
| 38 | * for searching forward or for split/balance. |
| 39 | * |
| 40 | * Because of this we need to indicate to lockdep that this is |
| 41 | * acceptable by having a different subclass for each of these |
| 42 | * operations. |
| 43 | */ |
| 44 | BTRFS_NESTING_LEFT, |
| 45 | BTRFS_NESTING_RIGHT, |
| 46 | |
| 47 | /* |
| 48 | * When splitting we will be holding a lock on the left/right node when |
| 49 | * we need to cow that node, thus we need a new set of subclasses for |
| 50 | * these two operations. |
| 51 | */ |
| 52 | BTRFS_NESTING_LEFT_COW, |
| 53 | BTRFS_NESTING_RIGHT_COW, |
| 54 | |
| 55 | /* |
| 56 | * When splitting we may push nodes to the left or right, but still use |
| 57 | * the subsequent nodes in our path, keeping our locks on those adjacent |
| 58 | * blocks. Thus when we go to allocate a new split block we've already |
| 59 | * used up all of our available subclasses, so this subclass exists to |
| 60 | * handle this case where we need to allocate a new split block. |
| 61 | */ |
| 62 | BTRFS_NESTING_SPLIT, |
| 63 | |
| 64 | /* |
| 65 | * When promoting a new block to a root we need to have a special |
| 66 | * subclass so we don't confuse lockdep, as it will appear that we are |
| 67 | * locking a higher level node before a lower level one. Copying also |
| 68 | * has this problem as it appears we're locking the same block again |
| 69 | * when we make a snapshot of an existing root. |
| 70 | */ |
| 71 | BTRFS_NESTING_NEW_ROOT, |
| 72 | |
| 73 | /* |
| 74 | * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so |
| 75 | * add this in here and add a static_assert to keep us from going over |
| 76 | * the limit. As of this writing we're limited to 8, and we're |
| 77 | * definitely using 8, hence this check to keep us from messing up in |
| 78 | * the future. |
| 79 | */ |
| 80 | BTRFS_NESTING_MAX, |
| 81 | }; |
| 82 | |
| 83 | static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES, |
| 84 | "too many lock subclasses defined"); |
| 85 | |
| 86 | struct btrfs_path; |
| 87 | |
| 88 | void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 89 | void btrfs_tree_lock(struct extent_buffer *eb); |
| 90 | void btrfs_tree_unlock(struct extent_buffer *eb); |
| 91 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 92 | void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest, |
| 93 | bool recurse); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | void btrfs_tree_read_lock(struct extent_buffer *eb); |
| 95 | void btrfs_tree_read_unlock(struct extent_buffer *eb); |
| 96 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 97 | void btrfs_set_lock_blocking_read(struct extent_buffer *eb); |
| 98 | void btrfs_set_lock_blocking_write(struct extent_buffer *eb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 99 | int btrfs_try_tree_read_lock(struct extent_buffer *eb); |
| 100 | int btrfs_try_tree_write_lock(struct extent_buffer *eb); |
| 101 | int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 102 | struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); |
| 103 | struct extent_buffer *__btrfs_read_lock_root_node(struct btrfs_root *root, |
| 104 | bool recurse); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 105 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 106 | static inline struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) |
| 107 | { |
| 108 | return __btrfs_read_lock_root_node(root, false); |
| 109 | } |
| 110 | |
| 111 | #ifdef CONFIG_BTRFS_DEBUG |
| 112 | static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { |
| 113 | BUG_ON(!eb->write_locks); |
| 114 | } |
| 115 | #else |
| 116 | static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { } |
| 117 | #endif |
| 118 | |
| 119 | void btrfs_set_path_blocking(struct btrfs_path *p); |
| 120 | void btrfs_unlock_up_safe(struct btrfs_path *path, int level); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 121 | |
| 122 | static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) |
| 123 | { |
| 124 | if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING) |
| 125 | btrfs_tree_unlock(eb); |
| 126 | else if (rw == BTRFS_READ_LOCK_BLOCKING) |
| 127 | btrfs_tree_read_unlock_blocking(eb); |
| 128 | else if (rw == BTRFS_READ_LOCK) |
| 129 | btrfs_tree_read_unlock(eb); |
| 130 | else |
| 131 | BUG(); |
| 132 | } |
| 133 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 134 | struct btrfs_drew_lock { |
| 135 | atomic_t readers; |
| 136 | struct percpu_counter writers; |
| 137 | wait_queue_head_t pending_writers; |
| 138 | wait_queue_head_t pending_readers; |
| 139 | }; |
| 140 | |
| 141 | int btrfs_drew_lock_init(struct btrfs_drew_lock *lock); |
| 142 | void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock); |
| 143 | void btrfs_drew_write_lock(struct btrfs_drew_lock *lock); |
| 144 | bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock); |
| 145 | void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock); |
| 146 | void btrfs_drew_read_lock(struct btrfs_drew_lock *lock); |
| 147 | void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock); |
| 148 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 149 | #endif |