Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * Copyright (C) 2017 Oracle. All Rights Reserved. |
| 4 | * Author: Darrick J. Wong <darrick.wong@oracle.com> |
| 5 | */ |
| 6 | #include "xfs.h" |
| 7 | #include "xfs_fs.h" |
| 8 | #include "xfs_shared.h" |
| 9 | #include "xfs_format.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | #include "xfs_btree.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | #include "xfs_rmap.h" |
| 12 | #include "xfs_refcount.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 13 | #include "scrub/scrub.h" |
| 14 | #include "scrub/common.h" |
| 15 | #include "scrub/btree.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 16 | |
| 17 | /* |
| 18 | * Set us up to scrub reference count btrees. |
| 19 | */ |
| 20 | int |
| 21 | xchk_setup_ag_refcountbt( |
| 22 | struct xfs_scrub *sc, |
| 23 | struct xfs_inode *ip) |
| 24 | { |
| 25 | return xchk_setup_ag_btree(sc, ip, false); |
| 26 | } |
| 27 | |
| 28 | /* Reference count btree scrubber. */ |
| 29 | |
| 30 | /* |
| 31 | * Confirming Reference Counts via Reverse Mappings |
| 32 | * |
| 33 | * We want to count the reverse mappings overlapping a refcount record |
| 34 | * (bno, len, refcount), allowing for the possibility that some of the |
| 35 | * overlap may come from smaller adjoining reverse mappings, while some |
| 36 | * comes from single extents which overlap the range entirely. The |
| 37 | * outer loop is as follows: |
| 38 | * |
| 39 | * 1. For all reverse mappings overlapping the refcount extent, |
| 40 | * a. If a given rmap completely overlaps, mark it as seen. |
| 41 | * b. Otherwise, record the fragment (in agbno order) for later |
| 42 | * processing. |
| 43 | * |
| 44 | * Once we've seen all the rmaps, we know that for all blocks in the |
| 45 | * refcount record we want to find $refcount owners and we've already |
| 46 | * visited $seen extents that overlap all the blocks. Therefore, we |
| 47 | * need to find ($refcount - $seen) owners for every block in the |
| 48 | * extent; call that quantity $target_nr. Proceed as follows: |
| 49 | * |
| 50 | * 2. Pull the first $target_nr fragments from the list; all of them |
| 51 | * should start at or before the start of the extent. |
| 52 | * Call this subset of fragments the working set. |
| 53 | * 3. Until there are no more unprocessed fragments, |
| 54 | * a. Find the shortest fragments in the set and remove them. |
| 55 | * b. Note the block number of the end of these fragments. |
| 56 | * c. Pull the same number of fragments from the list. All of these |
| 57 | * fragments should start at the block number recorded in the |
| 58 | * previous step. |
| 59 | * d. Put those fragments in the set. |
| 60 | * 4. Check that there are $target_nr fragments remaining in the list, |
| 61 | * and that they all end at or beyond the end of the refcount extent. |
| 62 | * |
| 63 | * If the refcount is correct, all the check conditions in the algorithm |
| 64 | * should always hold true. If not, the refcount is incorrect. |
| 65 | */ |
| 66 | struct xchk_refcnt_frag { |
| 67 | struct list_head list; |
| 68 | struct xfs_rmap_irec rm; |
| 69 | }; |
| 70 | |
| 71 | struct xchk_refcnt_check { |
| 72 | struct xfs_scrub *sc; |
| 73 | struct list_head fragments; |
| 74 | |
| 75 | /* refcount extent we're examining */ |
| 76 | xfs_agblock_t bno; |
| 77 | xfs_extlen_t len; |
| 78 | xfs_nlink_t refcount; |
| 79 | |
| 80 | /* number of owners seen */ |
| 81 | xfs_nlink_t seen; |
| 82 | }; |
| 83 | |
| 84 | /* |
| 85 | * Decide if the given rmap is large enough that we can redeem it |
| 86 | * towards refcount verification now, or if it's a fragment, in |
| 87 | * which case we'll hang onto it in the hopes that we'll later |
| 88 | * discover that we've collected exactly the correct number of |
| 89 | * fragments as the refcountbt says we should have. |
| 90 | */ |
| 91 | STATIC int |
| 92 | xchk_refcountbt_rmap_check( |
| 93 | struct xfs_btree_cur *cur, |
| 94 | struct xfs_rmap_irec *rec, |
| 95 | void *priv) |
| 96 | { |
| 97 | struct xchk_refcnt_check *refchk = priv; |
| 98 | struct xchk_refcnt_frag *frag; |
| 99 | xfs_agblock_t rm_last; |
| 100 | xfs_agblock_t rc_last; |
| 101 | int error = 0; |
| 102 | |
| 103 | if (xchk_should_terminate(refchk->sc, &error)) |
| 104 | return error; |
| 105 | |
| 106 | rm_last = rec->rm_startblock + rec->rm_blockcount - 1; |
| 107 | rc_last = refchk->bno + refchk->len - 1; |
| 108 | |
| 109 | /* Confirm that a single-owner refc extent is a CoW stage. */ |
| 110 | if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) { |
| 111 | xchk_btree_xref_set_corrupt(refchk->sc, cur, 0); |
| 112 | return 0; |
| 113 | } |
| 114 | |
| 115 | if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) { |
| 116 | /* |
| 117 | * The rmap overlaps the refcount record, so we can confirm |
| 118 | * one refcount owner seen. |
| 119 | */ |
| 120 | refchk->seen++; |
| 121 | } else { |
| 122 | /* |
| 123 | * This rmap covers only part of the refcount record, so |
| 124 | * save the fragment for later processing. If the rmapbt |
| 125 | * is healthy each rmap_irec we see will be in agbno order |
| 126 | * so we don't need insertion sort here. |
| 127 | */ |
| 128 | frag = kmem_alloc(sizeof(struct xchk_refcnt_frag), |
| 129 | KM_MAYFAIL); |
| 130 | if (!frag) |
| 131 | return -ENOMEM; |
| 132 | memcpy(&frag->rm, rec, sizeof(frag->rm)); |
| 133 | list_add_tail(&frag->list, &refchk->fragments); |
| 134 | } |
| 135 | |
| 136 | return 0; |
| 137 | } |
| 138 | |
| 139 | /* |
| 140 | * Given a bunch of rmap fragments, iterate through them, keeping |
| 141 | * a running tally of the refcount. If this ever deviates from |
| 142 | * what we expect (which is the refcountbt's refcount minus the |
| 143 | * number of extents that totally covered the refcountbt extent), |
| 144 | * we have a refcountbt error. |
| 145 | */ |
| 146 | STATIC void |
| 147 | xchk_refcountbt_process_rmap_fragments( |
| 148 | struct xchk_refcnt_check *refchk) |
| 149 | { |
| 150 | struct list_head worklist; |
| 151 | struct xchk_refcnt_frag *frag; |
| 152 | struct xchk_refcnt_frag *n; |
| 153 | xfs_agblock_t bno; |
| 154 | xfs_agblock_t rbno; |
| 155 | xfs_agblock_t next_rbno; |
| 156 | xfs_nlink_t nr; |
| 157 | xfs_nlink_t target_nr; |
| 158 | |
| 159 | target_nr = refchk->refcount - refchk->seen; |
| 160 | if (target_nr == 0) |
| 161 | return; |
| 162 | |
| 163 | /* |
| 164 | * There are (refchk->rc.rc_refcount - refchk->nr refcount) |
| 165 | * references we haven't found yet. Pull that many off the |
| 166 | * fragment list and figure out where the smallest rmap ends |
| 167 | * (and therefore the next rmap should start). All the rmaps |
| 168 | * we pull off should start at or before the beginning of the |
| 169 | * refcount record's range. |
| 170 | */ |
| 171 | INIT_LIST_HEAD(&worklist); |
| 172 | rbno = NULLAGBLOCK; |
| 173 | nr = 1; |
| 174 | |
| 175 | /* Make sure the fragments actually /are/ in agbno order. */ |
| 176 | bno = 0; |
| 177 | list_for_each_entry(frag, &refchk->fragments, list) { |
| 178 | if (frag->rm.rm_startblock < bno) |
| 179 | goto done; |
| 180 | bno = frag->rm.rm_startblock; |
| 181 | } |
| 182 | |
| 183 | /* |
| 184 | * Find all the rmaps that start at or before the refc extent, |
| 185 | * and put them on the worklist. |
| 186 | */ |
| 187 | list_for_each_entry_safe(frag, n, &refchk->fragments, list) { |
| 188 | if (frag->rm.rm_startblock > refchk->bno) |
| 189 | goto done; |
| 190 | bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; |
| 191 | if (bno < rbno) |
| 192 | rbno = bno; |
| 193 | list_move_tail(&frag->list, &worklist); |
| 194 | if (nr == target_nr) |
| 195 | break; |
| 196 | nr++; |
| 197 | } |
| 198 | |
| 199 | /* |
| 200 | * We should have found exactly $target_nr rmap fragments starting |
| 201 | * at or before the refcount extent. |
| 202 | */ |
| 203 | if (nr != target_nr) |
| 204 | goto done; |
| 205 | |
| 206 | while (!list_empty(&refchk->fragments)) { |
| 207 | /* Discard any fragments ending at rbno from the worklist. */ |
| 208 | nr = 0; |
| 209 | next_rbno = NULLAGBLOCK; |
| 210 | list_for_each_entry_safe(frag, n, &worklist, list) { |
| 211 | bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; |
| 212 | if (bno != rbno) { |
| 213 | if (bno < next_rbno) |
| 214 | next_rbno = bno; |
| 215 | continue; |
| 216 | } |
| 217 | list_del(&frag->list); |
| 218 | kmem_free(frag); |
| 219 | nr++; |
| 220 | } |
| 221 | |
| 222 | /* Try to add nr rmaps starting at rbno to the worklist. */ |
| 223 | list_for_each_entry_safe(frag, n, &refchk->fragments, list) { |
| 224 | bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; |
| 225 | if (frag->rm.rm_startblock != rbno) |
| 226 | goto done; |
| 227 | list_move_tail(&frag->list, &worklist); |
| 228 | if (next_rbno > bno) |
| 229 | next_rbno = bno; |
| 230 | nr--; |
| 231 | if (nr == 0) |
| 232 | break; |
| 233 | } |
| 234 | |
| 235 | /* |
| 236 | * If we get here and nr > 0, this means that we added fewer |
| 237 | * items to the worklist than we discarded because the fragment |
| 238 | * list ran out of items. Therefore, we cannot maintain the |
| 239 | * required refcount. Something is wrong, so we're done. |
| 240 | */ |
| 241 | if (nr) |
| 242 | goto done; |
| 243 | |
| 244 | rbno = next_rbno; |
| 245 | } |
| 246 | |
| 247 | /* |
| 248 | * Make sure the last extent we processed ends at or beyond |
| 249 | * the end of the refcount extent. |
| 250 | */ |
| 251 | if (rbno < refchk->bno + refchk->len) |
| 252 | goto done; |
| 253 | |
| 254 | /* Actually record us having seen the remaining refcount. */ |
| 255 | refchk->seen = refchk->refcount; |
| 256 | done: |
| 257 | /* Delete fragments and work list. */ |
| 258 | list_for_each_entry_safe(frag, n, &worklist, list) { |
| 259 | list_del(&frag->list); |
| 260 | kmem_free(frag); |
| 261 | } |
| 262 | list_for_each_entry_safe(frag, n, &refchk->fragments, list) { |
| 263 | list_del(&frag->list); |
| 264 | kmem_free(frag); |
| 265 | } |
| 266 | } |
| 267 | |
| 268 | /* Use the rmap entries covering this extent to verify the refcount. */ |
| 269 | STATIC void |
| 270 | xchk_refcountbt_xref_rmap( |
| 271 | struct xfs_scrub *sc, |
| 272 | xfs_agblock_t bno, |
| 273 | xfs_extlen_t len, |
| 274 | xfs_nlink_t refcount) |
| 275 | { |
| 276 | struct xchk_refcnt_check refchk = { |
| 277 | .sc = sc, |
| 278 | .bno = bno, |
| 279 | .len = len, |
| 280 | .refcount = refcount, |
| 281 | .seen = 0, |
| 282 | }; |
| 283 | struct xfs_rmap_irec low; |
| 284 | struct xfs_rmap_irec high; |
| 285 | struct xchk_refcnt_frag *frag; |
| 286 | struct xchk_refcnt_frag *n; |
| 287 | int error; |
| 288 | |
| 289 | if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) |
| 290 | return; |
| 291 | |
| 292 | /* Cross-reference with the rmapbt to confirm the refcount. */ |
| 293 | memset(&low, 0, sizeof(low)); |
| 294 | low.rm_startblock = bno; |
| 295 | memset(&high, 0xFF, sizeof(high)); |
| 296 | high.rm_startblock = bno + len - 1; |
| 297 | |
| 298 | INIT_LIST_HEAD(&refchk.fragments); |
| 299 | error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high, |
| 300 | &xchk_refcountbt_rmap_check, &refchk); |
| 301 | if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) |
| 302 | goto out_free; |
| 303 | |
| 304 | xchk_refcountbt_process_rmap_fragments(&refchk); |
| 305 | if (refcount != refchk.seen) |
| 306 | xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); |
| 307 | |
| 308 | out_free: |
| 309 | list_for_each_entry_safe(frag, n, &refchk.fragments, list) { |
| 310 | list_del(&frag->list); |
| 311 | kmem_free(frag); |
| 312 | } |
| 313 | } |
| 314 | |
| 315 | /* Cross-reference with the other btrees. */ |
| 316 | STATIC void |
| 317 | xchk_refcountbt_xref( |
| 318 | struct xfs_scrub *sc, |
| 319 | xfs_agblock_t agbno, |
| 320 | xfs_extlen_t len, |
| 321 | xfs_nlink_t refcount) |
| 322 | { |
| 323 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
| 324 | return; |
| 325 | |
| 326 | xchk_xref_is_used_space(sc, agbno, len); |
| 327 | xchk_xref_is_not_inode_chunk(sc, agbno, len); |
| 328 | xchk_refcountbt_xref_rmap(sc, agbno, len, refcount); |
| 329 | } |
| 330 | |
| 331 | /* Scrub a refcountbt record. */ |
| 332 | STATIC int |
| 333 | xchk_refcountbt_rec( |
| 334 | struct xchk_btree *bs, |
| 335 | union xfs_btree_rec *rec) |
| 336 | { |
| 337 | struct xfs_mount *mp = bs->cur->bc_mp; |
| 338 | xfs_agblock_t *cow_blocks = bs->private; |
| 339 | xfs_agnumber_t agno = bs->cur->bc_private.a.agno; |
| 340 | xfs_agblock_t bno; |
| 341 | xfs_extlen_t len; |
| 342 | xfs_nlink_t refcount; |
| 343 | bool has_cowflag; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 344 | |
| 345 | bno = be32_to_cpu(rec->refc.rc_startblock); |
| 346 | len = be32_to_cpu(rec->refc.rc_blockcount); |
| 347 | refcount = be32_to_cpu(rec->refc.rc_refcount); |
| 348 | |
| 349 | /* Only CoW records can have refcount == 1. */ |
| 350 | has_cowflag = (bno & XFS_REFC_COW_START); |
| 351 | if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag)) |
| 352 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
| 353 | if (has_cowflag) |
| 354 | (*cow_blocks) += len; |
| 355 | |
| 356 | /* Check the extent. */ |
| 357 | bno &= ~XFS_REFC_COW_START; |
| 358 | if (bno + len <= bno || |
| 359 | !xfs_verify_agbno(mp, agno, bno) || |
| 360 | !xfs_verify_agbno(mp, agno, bno + len - 1)) |
| 361 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
| 362 | |
| 363 | if (refcount == 0) |
| 364 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
| 365 | |
| 366 | xchk_refcountbt_xref(bs->sc, bno, len, refcount); |
| 367 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 368 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 369 | } |
| 370 | |
| 371 | /* Make sure we have as many refc blocks as the rmap says. */ |
| 372 | STATIC void |
| 373 | xchk_refcount_xref_rmap( |
| 374 | struct xfs_scrub *sc, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 375 | xfs_filblks_t cow_blocks) |
| 376 | { |
| 377 | xfs_extlen_t refcbt_blocks = 0; |
| 378 | xfs_filblks_t blocks; |
| 379 | int error; |
| 380 | |
| 381 | if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) |
| 382 | return; |
| 383 | |
| 384 | /* Check that we saw as many refcbt blocks as the rmap knows about. */ |
| 385 | error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks); |
| 386 | if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error)) |
| 387 | return; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 388 | error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, |
| 389 | &XFS_RMAP_OINFO_REFC, &blocks); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 390 | if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) |
| 391 | return; |
| 392 | if (blocks != refcbt_blocks) |
| 393 | xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); |
| 394 | |
| 395 | /* Check that we saw as many cow blocks as the rmap knows about. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 396 | error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, |
| 397 | &XFS_RMAP_OINFO_COW, &blocks); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 398 | if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) |
| 399 | return; |
| 400 | if (blocks != cow_blocks) |
| 401 | xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); |
| 402 | } |
| 403 | |
| 404 | /* Scrub the refcount btree for some AG. */ |
| 405 | int |
| 406 | xchk_refcountbt( |
| 407 | struct xfs_scrub *sc) |
| 408 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 409 | xfs_agblock_t cow_blocks = 0; |
| 410 | int error; |
| 411 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 412 | error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 413 | &XFS_RMAP_OINFO_REFC, &cow_blocks); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 414 | if (error) |
| 415 | return error; |
| 416 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 417 | xchk_refcount_xref_rmap(sc, cow_blocks); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 418 | |
| 419 | return 0; |
| 420 | } |
| 421 | |
| 422 | /* xref check that a cow staging extent is marked in the refcountbt. */ |
| 423 | void |
| 424 | xchk_xref_is_cow_staging( |
| 425 | struct xfs_scrub *sc, |
| 426 | xfs_agblock_t agbno, |
| 427 | xfs_extlen_t len) |
| 428 | { |
| 429 | struct xfs_refcount_irec rc; |
| 430 | bool has_cowflag; |
| 431 | int has_refcount; |
| 432 | int error; |
| 433 | |
| 434 | if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) |
| 435 | return; |
| 436 | |
| 437 | /* Find the CoW staging extent. */ |
| 438 | error = xfs_refcount_lookup_le(sc->sa.refc_cur, |
| 439 | agbno + XFS_REFC_COW_START, &has_refcount); |
| 440 | if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) |
| 441 | return; |
| 442 | if (!has_refcount) { |
| 443 | xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); |
| 444 | return; |
| 445 | } |
| 446 | |
| 447 | error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount); |
| 448 | if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) |
| 449 | return; |
| 450 | if (!has_refcount) { |
| 451 | xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); |
| 452 | return; |
| 453 | } |
| 454 | |
| 455 | /* CoW flag must be set, refcount must be 1. */ |
| 456 | has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START); |
| 457 | if (!has_cowflag || rc.rc_refcount != 1) |
| 458 | xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); |
| 459 | |
| 460 | /* Must be at least as long as what was passed in */ |
| 461 | if (rc.rc_blockcount < len) |
| 462 | xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); |
| 463 | } |
| 464 | |
| 465 | /* |
| 466 | * xref check that the extent is not shared. Only file data blocks |
| 467 | * can have multiple owners. |
| 468 | */ |
| 469 | void |
| 470 | xchk_xref_is_not_shared( |
| 471 | struct xfs_scrub *sc, |
| 472 | xfs_agblock_t agbno, |
| 473 | xfs_extlen_t len) |
| 474 | { |
| 475 | bool shared; |
| 476 | int error; |
| 477 | |
| 478 | if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) |
| 479 | return; |
| 480 | |
| 481 | error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared); |
| 482 | if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) |
| 483 | return; |
| 484 | if (shared) |
| 485 | xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); |
| 486 | } |