Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * Copyright (C) 2017 Oracle. All Rights Reserved. |
| 4 | * Author: Darrick J. Wong <darrick.wong@oracle.com> |
| 5 | */ |
| 6 | #include "xfs.h" |
| 7 | #include "xfs_fs.h" |
| 8 | #include "xfs_shared.h" |
| 9 | #include "xfs_format.h" |
| 10 | #include "xfs_trans_resv.h" |
| 11 | #include "xfs_mount.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | #include "xfs_btree.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 13 | #include "xfs_rmap.h" |
| 14 | #include "xfs_refcount.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 15 | #include "scrub/scrub.h" |
| 16 | #include "scrub/common.h" |
| 17 | #include "scrub/btree.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 18 | |
| 19 | /* |
| 20 | * Set us up to scrub reverse mapping btrees. |
| 21 | */ |
| 22 | int |
| 23 | xchk_setup_ag_rmapbt( |
| 24 | struct xfs_scrub *sc, |
| 25 | struct xfs_inode *ip) |
| 26 | { |
| 27 | return xchk_setup_ag_btree(sc, ip, false); |
| 28 | } |
| 29 | |
| 30 | /* Reverse-mapping scrubber. */ |
| 31 | |
| 32 | /* Cross-reference a rmap against the refcount btree. */ |
| 33 | STATIC void |
| 34 | xchk_rmapbt_xref_refc( |
| 35 | struct xfs_scrub *sc, |
| 36 | struct xfs_rmap_irec *irec) |
| 37 | { |
| 38 | xfs_agblock_t fbno; |
| 39 | xfs_extlen_t flen; |
| 40 | bool non_inode; |
| 41 | bool is_bmbt; |
| 42 | bool is_attr; |
| 43 | bool is_unwritten; |
| 44 | int error; |
| 45 | |
| 46 | if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) |
| 47 | return; |
| 48 | |
| 49 | non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner); |
| 50 | is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK; |
| 51 | is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK; |
| 52 | is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN; |
| 53 | |
| 54 | /* If this is shared, must be a data fork extent. */ |
| 55 | error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock, |
| 56 | irec->rm_blockcount, &fbno, &flen, false); |
| 57 | if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) |
| 58 | return; |
| 59 | if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten)) |
| 60 | xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); |
| 61 | } |
| 62 | |
| 63 | /* Cross-reference with the other btrees. */ |
| 64 | STATIC void |
| 65 | xchk_rmapbt_xref( |
| 66 | struct xfs_scrub *sc, |
| 67 | struct xfs_rmap_irec *irec) |
| 68 | { |
| 69 | xfs_agblock_t agbno = irec->rm_startblock; |
| 70 | xfs_extlen_t len = irec->rm_blockcount; |
| 71 | |
| 72 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
| 73 | return; |
| 74 | |
| 75 | xchk_xref_is_used_space(sc, agbno, len); |
| 76 | if (irec->rm_owner == XFS_RMAP_OWN_INODES) |
| 77 | xchk_xref_is_inode_chunk(sc, agbno, len); |
| 78 | else |
| 79 | xchk_xref_is_not_inode_chunk(sc, agbno, len); |
| 80 | if (irec->rm_owner == XFS_RMAP_OWN_COW) |
| 81 | xchk_xref_is_cow_staging(sc, irec->rm_startblock, |
| 82 | irec->rm_blockcount); |
| 83 | else |
| 84 | xchk_rmapbt_xref_refc(sc, irec); |
| 85 | } |
| 86 | |
| 87 | /* Scrub an rmapbt record. */ |
| 88 | STATIC int |
| 89 | xchk_rmapbt_rec( |
| 90 | struct xchk_btree *bs, |
| 91 | union xfs_btree_rec *rec) |
| 92 | { |
| 93 | struct xfs_mount *mp = bs->cur->bc_mp; |
| 94 | struct xfs_rmap_irec irec; |
| 95 | xfs_agnumber_t agno = bs->cur->bc_private.a.agno; |
| 96 | bool non_inode; |
| 97 | bool is_unwritten; |
| 98 | bool is_bmbt; |
| 99 | bool is_attr; |
| 100 | int error; |
| 101 | |
| 102 | error = xfs_rmap_btrec_to_irec(rec, &irec); |
| 103 | if (!xchk_btree_process_error(bs->sc, bs->cur, 0, &error)) |
| 104 | goto out; |
| 105 | |
| 106 | /* Check extent. */ |
| 107 | if (irec.rm_startblock + irec.rm_blockcount <= irec.rm_startblock) |
| 108 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
| 109 | |
| 110 | if (irec.rm_owner == XFS_RMAP_OWN_FS) { |
| 111 | /* |
| 112 | * xfs_verify_agbno returns false for static fs metadata. |
| 113 | * Since that only exists at the start of the AG, validate |
| 114 | * that by hand. |
| 115 | */ |
| 116 | if (irec.rm_startblock != 0 || |
| 117 | irec.rm_blockcount != XFS_AGFL_BLOCK(mp) + 1) |
| 118 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
| 119 | } else { |
| 120 | /* |
| 121 | * Otherwise we must point somewhere past the static metadata |
| 122 | * but before the end of the FS. Run the regular check. |
| 123 | */ |
| 124 | if (!xfs_verify_agbno(mp, agno, irec.rm_startblock) || |
| 125 | !xfs_verify_agbno(mp, agno, irec.rm_startblock + |
| 126 | irec.rm_blockcount - 1)) |
| 127 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
| 128 | } |
| 129 | |
| 130 | /* Check flags. */ |
| 131 | non_inode = XFS_RMAP_NON_INODE_OWNER(irec.rm_owner); |
| 132 | is_bmbt = irec.rm_flags & XFS_RMAP_BMBT_BLOCK; |
| 133 | is_attr = irec.rm_flags & XFS_RMAP_ATTR_FORK; |
| 134 | is_unwritten = irec.rm_flags & XFS_RMAP_UNWRITTEN; |
| 135 | |
| 136 | if (is_bmbt && irec.rm_offset != 0) |
| 137 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
| 138 | |
| 139 | if (non_inode && irec.rm_offset != 0) |
| 140 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
| 141 | |
| 142 | if (is_unwritten && (is_bmbt || non_inode || is_attr)) |
| 143 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
| 144 | |
| 145 | if (non_inode && (is_bmbt || is_unwritten || is_attr)) |
| 146 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
| 147 | |
| 148 | if (!non_inode) { |
| 149 | if (!xfs_verify_ino(mp, irec.rm_owner)) |
| 150 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
| 151 | } else { |
| 152 | /* Non-inode owner within the magic values? */ |
| 153 | if (irec.rm_owner <= XFS_RMAP_OWN_MIN || |
| 154 | irec.rm_owner > XFS_RMAP_OWN_FS) |
| 155 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
| 156 | } |
| 157 | |
| 158 | xchk_rmapbt_xref(bs->sc, &irec); |
| 159 | out: |
| 160 | return error; |
| 161 | } |
| 162 | |
| 163 | /* Scrub the rmap btree for some AG. */ |
| 164 | int |
| 165 | xchk_rmapbt( |
| 166 | struct xfs_scrub *sc) |
| 167 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 168 | return xchk_btree(sc, sc->sa.rmap_cur, xchk_rmapbt_rec, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 169 | &XFS_RMAP_OINFO_AG, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 170 | } |
| 171 | |
| 172 | /* xref check that the extent is owned by a given owner */ |
| 173 | static inline void |
| 174 | xchk_xref_check_owner( |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 175 | struct xfs_scrub *sc, |
| 176 | xfs_agblock_t bno, |
| 177 | xfs_extlen_t len, |
| 178 | const struct xfs_owner_info *oinfo, |
| 179 | bool should_have_rmap) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 180 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 181 | bool has_rmap; |
| 182 | int error; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 183 | |
| 184 | if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) |
| 185 | return; |
| 186 | |
| 187 | error = xfs_rmap_record_exists(sc->sa.rmap_cur, bno, len, oinfo, |
| 188 | &has_rmap); |
| 189 | if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) |
| 190 | return; |
| 191 | if (has_rmap != should_have_rmap) |
| 192 | xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); |
| 193 | } |
| 194 | |
| 195 | /* xref check that the extent is owned by a given owner */ |
| 196 | void |
| 197 | xchk_xref_is_owned_by( |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 198 | struct xfs_scrub *sc, |
| 199 | xfs_agblock_t bno, |
| 200 | xfs_extlen_t len, |
| 201 | const struct xfs_owner_info *oinfo) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 202 | { |
| 203 | xchk_xref_check_owner(sc, bno, len, oinfo, true); |
| 204 | } |
| 205 | |
| 206 | /* xref check that the extent is not owned by a given owner */ |
| 207 | void |
| 208 | xchk_xref_is_not_owned_by( |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 209 | struct xfs_scrub *sc, |
| 210 | xfs_agblock_t bno, |
| 211 | xfs_extlen_t len, |
| 212 | const struct xfs_owner_info *oinfo) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 213 | { |
| 214 | xchk_xref_check_owner(sc, bno, len, oinfo, false); |
| 215 | } |
| 216 | |
| 217 | /* xref check that the extent has no reverse mapping at all */ |
| 218 | void |
| 219 | xchk_xref_has_no_owner( |
| 220 | struct xfs_scrub *sc, |
| 221 | xfs_agblock_t bno, |
| 222 | xfs_extlen_t len) |
| 223 | { |
| 224 | bool has_rmap; |
| 225 | int error; |
| 226 | |
| 227 | if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) |
| 228 | return; |
| 229 | |
| 230 | error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap); |
| 231 | if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) |
| 232 | return; |
| 233 | if (has_rmap) |
| 234 | xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); |
| 235 | } |