Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * Copyright (C) 2017 Oracle. All Rights Reserved. |
| 4 | * Author: Darrick J. Wong <darrick.wong@oracle.com> |
| 5 | */ |
| 6 | #include "xfs.h" |
| 7 | #include "xfs_fs.h" |
| 8 | #include "xfs_shared.h" |
| 9 | #include "xfs_format.h" |
| 10 | #include "xfs_trans_resv.h" |
| 11 | #include "xfs_mount.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | #include "xfs_log_format.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 13 | #include "xfs_inode.h" |
| 14 | #include "xfs_da_format.h" |
| 15 | #include "xfs_da_btree.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 16 | #include "xfs_attr.h" |
| 17 | #include "xfs_attr_leaf.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 18 | #include "scrub/scrub.h" |
| 19 | #include "scrub/common.h" |
| 20 | #include "scrub/dabtree.h" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 21 | #include "scrub/attr.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 22 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 23 | /* |
| 24 | * Allocate enough memory to hold an attr value and attr block bitmaps, |
| 25 | * reallocating the buffer if necessary. Buffer contents are not preserved |
| 26 | * across a reallocation. |
| 27 | */ |
| 28 | int |
| 29 | xchk_setup_xattr_buf( |
| 30 | struct xfs_scrub *sc, |
| 31 | size_t value_size, |
| 32 | xfs_km_flags_t flags) |
| 33 | { |
| 34 | size_t sz; |
| 35 | struct xchk_xattr_buf *ab = sc->buf; |
| 36 | |
| 37 | /* |
| 38 | * We need enough space to read an xattr value from the file or enough |
| 39 | * space to hold three copies of the xattr free space bitmap. We don't |
| 40 | * need the buffer space for both purposes at the same time. |
| 41 | */ |
| 42 | sz = 3 * sizeof(long) * BITS_TO_LONGS(sc->mp->m_attr_geo->blksize); |
| 43 | sz = max_t(size_t, sz, value_size); |
| 44 | |
| 45 | /* |
| 46 | * If there's already a buffer, figure out if we need to reallocate it |
| 47 | * to accommodate a larger size. |
| 48 | */ |
| 49 | if (ab) { |
| 50 | if (sz <= ab->sz) |
| 51 | return 0; |
| 52 | kmem_free(ab); |
| 53 | sc->buf = NULL; |
| 54 | } |
| 55 | |
| 56 | /* |
| 57 | * Don't zero the buffer upon allocation to avoid runtime overhead. |
| 58 | * All users must be careful never to read uninitialized contents. |
| 59 | */ |
| 60 | ab = kmem_alloc_large(sizeof(*ab) + sz, flags); |
| 61 | if (!ab) |
| 62 | return -ENOMEM; |
| 63 | |
| 64 | ab->sz = sz; |
| 65 | sc->buf = ab; |
| 66 | return 0; |
| 67 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 68 | |
| 69 | /* Set us up to scrub an inode's extended attributes. */ |
| 70 | int |
| 71 | xchk_setup_xattr( |
| 72 | struct xfs_scrub *sc, |
| 73 | struct xfs_inode *ip) |
| 74 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 75 | int error; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 76 | |
| 77 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 78 | * We failed to get memory while checking attrs, so this time try to |
| 79 | * get all the memory we're ever going to need. Allocate the buffer |
| 80 | * without the inode lock held, which means we can sleep. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 82 | if (sc->flags & XCHK_TRY_HARDER) { |
| 83 | error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, 0); |
| 84 | if (error) |
| 85 | return error; |
| 86 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 87 | |
| 88 | return xchk_setup_inode_contents(sc, ip, 0); |
| 89 | } |
| 90 | |
| 91 | /* Extended Attributes */ |
| 92 | |
| 93 | struct xchk_xattr { |
| 94 | struct xfs_attr_list_context context; |
| 95 | struct xfs_scrub *sc; |
| 96 | }; |
| 97 | |
| 98 | /* |
| 99 | * Check that an extended attribute key can be looked up by hash. |
| 100 | * |
| 101 | * We use the XFS attribute list iterator (i.e. xfs_attr_list_int_ilocked) |
| 102 | * to call this function for every attribute key in an inode. Once |
| 103 | * we're here, we load the attribute value to see if any errors happen, |
| 104 | * or if we get more or less data than we expected. |
| 105 | */ |
| 106 | static void |
| 107 | xchk_xattr_listent( |
| 108 | struct xfs_attr_list_context *context, |
| 109 | int flags, |
| 110 | unsigned char *name, |
| 111 | int namelen, |
| 112 | int valuelen) |
| 113 | { |
| 114 | struct xchk_xattr *sx; |
| 115 | struct xfs_da_args args = { NULL }; |
| 116 | int error = 0; |
| 117 | |
| 118 | sx = container_of(context, struct xchk_xattr, context); |
| 119 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 120 | if (xchk_should_terminate(sx->sc, &error)) { |
| 121 | context->seen_enough = error; |
| 122 | return; |
| 123 | } |
| 124 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 125 | if (flags & XFS_ATTR_INCOMPLETE) { |
| 126 | /* Incomplete attr key, just mark the inode for preening. */ |
| 127 | xchk_ino_set_preen(sx->sc, context->dp->i_ino); |
| 128 | return; |
| 129 | } |
| 130 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 131 | /* Does this name make sense? */ |
| 132 | if (!xfs_attr_namecheck(name, namelen)) { |
| 133 | xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno); |
| 134 | return; |
| 135 | } |
| 136 | |
| 137 | /* |
| 138 | * Try to allocate enough memory to extrat the attr value. If that |
| 139 | * doesn't work, we overload the seen_enough variable to convey |
| 140 | * the error message back to the main scrub function. |
| 141 | */ |
| 142 | error = xchk_setup_xattr_buf(sx->sc, valuelen, KM_MAYFAIL); |
| 143 | if (error == -ENOMEM) |
| 144 | error = -EDEADLOCK; |
| 145 | if (error) { |
| 146 | context->seen_enough = error; |
| 147 | return; |
| 148 | } |
| 149 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 150 | args.flags = ATTR_KERNOTIME; |
| 151 | if (flags & XFS_ATTR_ROOT) |
| 152 | args.flags |= ATTR_ROOT; |
| 153 | else if (flags & XFS_ATTR_SECURE) |
| 154 | args.flags |= ATTR_SECURE; |
| 155 | args.geo = context->dp->i_mount->m_attr_geo; |
| 156 | args.whichfork = XFS_ATTR_FORK; |
| 157 | args.dp = context->dp; |
| 158 | args.name = name; |
| 159 | args.namelen = namelen; |
| 160 | args.hashval = xfs_da_hashname(args.name, args.namelen); |
| 161 | args.trans = context->tp; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 162 | args.value = xchk_xattr_valuebuf(sx->sc); |
| 163 | args.valuelen = valuelen; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 164 | |
| 165 | error = xfs_attr_get_ilocked(context->dp, &args); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 166 | if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno, |
| 167 | &error)) |
| 168 | goto fail_xref; |
| 169 | if (args.valuelen != valuelen) |
| 170 | xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, |
| 171 | args.blkno); |
| 172 | fail_xref: |
| 173 | if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
| 174 | context->seen_enough = 1; |
| 175 | return; |
| 176 | } |
| 177 | |
| 178 | /* |
| 179 | * Mark a range [start, start+len) in this map. Returns true if the |
| 180 | * region was free, and false if there's a conflict or a problem. |
| 181 | * |
| 182 | * Within a char, the lowest bit of the char represents the byte with |
| 183 | * the smallest address |
| 184 | */ |
| 185 | STATIC bool |
| 186 | xchk_xattr_set_map( |
| 187 | struct xfs_scrub *sc, |
| 188 | unsigned long *map, |
| 189 | unsigned int start, |
| 190 | unsigned int len) |
| 191 | { |
| 192 | unsigned int mapsize = sc->mp->m_attr_geo->blksize; |
| 193 | bool ret = true; |
| 194 | |
| 195 | if (start >= mapsize) |
| 196 | return false; |
| 197 | if (start + len > mapsize) { |
| 198 | len = mapsize - start; |
| 199 | ret = false; |
| 200 | } |
| 201 | |
| 202 | if (find_next_bit(map, mapsize, start) < start + len) |
| 203 | ret = false; |
| 204 | bitmap_set(map, start, len); |
| 205 | |
| 206 | return ret; |
| 207 | } |
| 208 | |
| 209 | /* |
| 210 | * Check the leaf freemap from the usage bitmap. Returns false if the |
| 211 | * attr freemap has problems or points to used space. |
| 212 | */ |
| 213 | STATIC bool |
| 214 | xchk_xattr_check_freemap( |
| 215 | struct xfs_scrub *sc, |
| 216 | unsigned long *map, |
| 217 | struct xfs_attr3_icleaf_hdr *leafhdr) |
| 218 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 219 | unsigned long *freemap = xchk_xattr_freemap(sc); |
| 220 | unsigned long *dstmap = xchk_xattr_dstmap(sc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 221 | unsigned int mapsize = sc->mp->m_attr_geo->blksize; |
| 222 | int i; |
| 223 | |
| 224 | /* Construct bitmap of freemap contents. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 225 | bitmap_zero(freemap, mapsize); |
| 226 | for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { |
| 227 | if (!xchk_xattr_set_map(sc, freemap, |
| 228 | leafhdr->freemap[i].base, |
| 229 | leafhdr->freemap[i].size)) |
| 230 | return false; |
| 231 | } |
| 232 | |
| 233 | /* Look for bits that are set in freemap and are marked in use. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 234 | return bitmap_and(dstmap, freemap, map, mapsize) == 0; |
| 235 | } |
| 236 | |
| 237 | /* |
| 238 | * Check this leaf entry's relations to everything else. |
| 239 | * Returns the number of bytes used for the name/value data. |
| 240 | */ |
| 241 | STATIC void |
| 242 | xchk_xattr_entry( |
| 243 | struct xchk_da_btree *ds, |
| 244 | int level, |
| 245 | char *buf_end, |
| 246 | struct xfs_attr_leafblock *leaf, |
| 247 | struct xfs_attr3_icleaf_hdr *leafhdr, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 248 | struct xfs_attr_leaf_entry *ent, |
| 249 | int idx, |
| 250 | unsigned int *usedbytes, |
| 251 | __u32 *last_hashval) |
| 252 | { |
| 253 | struct xfs_mount *mp = ds->state->mp; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 254 | unsigned long *usedmap = xchk_xattr_usedmap(ds->sc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 255 | char *name_end; |
| 256 | struct xfs_attr_leaf_name_local *lentry; |
| 257 | struct xfs_attr_leaf_name_remote *rentry; |
| 258 | unsigned int nameidx; |
| 259 | unsigned int namesize; |
| 260 | |
| 261 | if (ent->pad2 != 0) |
| 262 | xchk_da_set_corrupt(ds, level); |
| 263 | |
| 264 | /* Hash values in order? */ |
| 265 | if (be32_to_cpu(ent->hashval) < *last_hashval) |
| 266 | xchk_da_set_corrupt(ds, level); |
| 267 | *last_hashval = be32_to_cpu(ent->hashval); |
| 268 | |
| 269 | nameidx = be16_to_cpu(ent->nameidx); |
| 270 | if (nameidx < leafhdr->firstused || |
| 271 | nameidx >= mp->m_attr_geo->blksize) { |
| 272 | xchk_da_set_corrupt(ds, level); |
| 273 | return; |
| 274 | } |
| 275 | |
| 276 | /* Check the name information. */ |
| 277 | if (ent->flags & XFS_ATTR_LOCAL) { |
| 278 | lentry = xfs_attr3_leaf_name_local(leaf, idx); |
| 279 | namesize = xfs_attr_leaf_entsize_local(lentry->namelen, |
| 280 | be16_to_cpu(lentry->valuelen)); |
| 281 | name_end = (char *)lentry + namesize; |
| 282 | if (lentry->namelen == 0) |
| 283 | xchk_da_set_corrupt(ds, level); |
| 284 | } else { |
| 285 | rentry = xfs_attr3_leaf_name_remote(leaf, idx); |
| 286 | namesize = xfs_attr_leaf_entsize_remote(rentry->namelen); |
| 287 | name_end = (char *)rentry + namesize; |
| 288 | if (rentry->namelen == 0 || rentry->valueblk == 0) |
| 289 | xchk_da_set_corrupt(ds, level); |
| 290 | } |
| 291 | if (name_end > buf_end) |
| 292 | xchk_da_set_corrupt(ds, level); |
| 293 | |
| 294 | if (!xchk_xattr_set_map(ds->sc, usedmap, nameidx, namesize)) |
| 295 | xchk_da_set_corrupt(ds, level); |
| 296 | if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) |
| 297 | *usedbytes += namesize; |
| 298 | } |
| 299 | |
| 300 | /* Scrub an attribute leaf. */ |
| 301 | STATIC int |
| 302 | xchk_xattr_block( |
| 303 | struct xchk_da_btree *ds, |
| 304 | int level) |
| 305 | { |
| 306 | struct xfs_attr3_icleaf_hdr leafhdr; |
| 307 | struct xfs_mount *mp = ds->state->mp; |
| 308 | struct xfs_da_state_blk *blk = &ds->state->path.blk[level]; |
| 309 | struct xfs_buf *bp = blk->bp; |
| 310 | xfs_dablk_t *last_checked = ds->private; |
| 311 | struct xfs_attr_leafblock *leaf = bp->b_addr; |
| 312 | struct xfs_attr_leaf_entry *ent; |
| 313 | struct xfs_attr_leaf_entry *entries; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 314 | unsigned long *usedmap; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 315 | char *buf_end; |
| 316 | size_t off; |
| 317 | __u32 last_hashval = 0; |
| 318 | unsigned int usedbytes = 0; |
| 319 | unsigned int hdrsize; |
| 320 | int i; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 321 | int error; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 322 | |
| 323 | if (*last_checked == blk->blkno) |
| 324 | return 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 325 | |
| 326 | /* Allocate memory for block usage checking. */ |
| 327 | error = xchk_setup_xattr_buf(ds->sc, 0, KM_MAYFAIL); |
| 328 | if (error == -ENOMEM) |
| 329 | return -EDEADLOCK; |
| 330 | if (error) |
| 331 | return error; |
| 332 | usedmap = xchk_xattr_usedmap(ds->sc); |
| 333 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 334 | *last_checked = blk->blkno; |
| 335 | bitmap_zero(usedmap, mp->m_attr_geo->blksize); |
| 336 | |
| 337 | /* Check all the padding. */ |
| 338 | if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb)) { |
| 339 | struct xfs_attr3_leafblock *leaf = bp->b_addr; |
| 340 | |
| 341 | if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 || |
| 342 | leaf->hdr.info.hdr.pad != 0) |
| 343 | xchk_da_set_corrupt(ds, level); |
| 344 | } else { |
| 345 | if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0) |
| 346 | xchk_da_set_corrupt(ds, level); |
| 347 | } |
| 348 | |
| 349 | /* Check the leaf header */ |
| 350 | xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); |
| 351 | hdrsize = xfs_attr3_leaf_hdr_size(leaf); |
| 352 | |
| 353 | if (leafhdr.usedbytes > mp->m_attr_geo->blksize) |
| 354 | xchk_da_set_corrupt(ds, level); |
| 355 | if (leafhdr.firstused > mp->m_attr_geo->blksize) |
| 356 | xchk_da_set_corrupt(ds, level); |
| 357 | if (leafhdr.firstused < hdrsize) |
| 358 | xchk_da_set_corrupt(ds, level); |
| 359 | if (!xchk_xattr_set_map(ds->sc, usedmap, 0, hdrsize)) |
| 360 | xchk_da_set_corrupt(ds, level); |
| 361 | |
| 362 | if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
| 363 | goto out; |
| 364 | |
| 365 | entries = xfs_attr3_leaf_entryp(leaf); |
| 366 | if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused) |
| 367 | xchk_da_set_corrupt(ds, level); |
| 368 | |
| 369 | buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize; |
| 370 | for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) { |
| 371 | /* Mark the leaf entry itself. */ |
| 372 | off = (char *)ent - (char *)leaf; |
| 373 | if (!xchk_xattr_set_map(ds->sc, usedmap, off, |
| 374 | sizeof(xfs_attr_leaf_entry_t))) { |
| 375 | xchk_da_set_corrupt(ds, level); |
| 376 | goto out; |
| 377 | } |
| 378 | |
| 379 | /* Check the entry and nameval. */ |
| 380 | xchk_xattr_entry(ds, level, buf_end, leaf, &leafhdr, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 381 | ent, i, &usedbytes, &last_hashval); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 382 | |
| 383 | if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
| 384 | goto out; |
| 385 | } |
| 386 | |
| 387 | if (!xchk_xattr_check_freemap(ds->sc, usedmap, &leafhdr)) |
| 388 | xchk_da_set_corrupt(ds, level); |
| 389 | |
| 390 | if (leafhdr.usedbytes != usedbytes) |
| 391 | xchk_da_set_corrupt(ds, level); |
| 392 | |
| 393 | out: |
| 394 | return 0; |
| 395 | } |
| 396 | |
| 397 | /* Scrub a attribute btree record. */ |
| 398 | STATIC int |
| 399 | xchk_xattr_rec( |
| 400 | struct xchk_da_btree *ds, |
| 401 | int level, |
| 402 | void *rec) |
| 403 | { |
| 404 | struct xfs_mount *mp = ds->state->mp; |
| 405 | struct xfs_attr_leaf_entry *ent = rec; |
| 406 | struct xfs_da_state_blk *blk; |
| 407 | struct xfs_attr_leaf_name_local *lentry; |
| 408 | struct xfs_attr_leaf_name_remote *rentry; |
| 409 | struct xfs_buf *bp; |
| 410 | xfs_dahash_t calc_hash; |
| 411 | xfs_dahash_t hash; |
| 412 | int nameidx; |
| 413 | int hdrsize; |
| 414 | unsigned int badflags; |
| 415 | int error; |
| 416 | |
| 417 | blk = &ds->state->path.blk[level]; |
| 418 | |
| 419 | /* Check the whole block, if necessary. */ |
| 420 | error = xchk_xattr_block(ds, level); |
| 421 | if (error) |
| 422 | goto out; |
| 423 | if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
| 424 | goto out; |
| 425 | |
| 426 | /* Check the hash of the entry. */ |
| 427 | error = xchk_da_btree_hash(ds, level, &ent->hashval); |
| 428 | if (error) |
| 429 | goto out; |
| 430 | |
| 431 | /* Find the attr entry's location. */ |
| 432 | bp = blk->bp; |
| 433 | hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr); |
| 434 | nameidx = be16_to_cpu(ent->nameidx); |
| 435 | if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) { |
| 436 | xchk_da_set_corrupt(ds, level); |
| 437 | goto out; |
| 438 | } |
| 439 | |
| 440 | /* Retrieve the entry and check it. */ |
| 441 | hash = be32_to_cpu(ent->hashval); |
| 442 | badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE | |
| 443 | XFS_ATTR_INCOMPLETE); |
| 444 | if ((ent->flags & badflags) != 0) |
| 445 | xchk_da_set_corrupt(ds, level); |
| 446 | if (ent->flags & XFS_ATTR_LOCAL) { |
| 447 | lentry = (struct xfs_attr_leaf_name_local *) |
| 448 | (((char *)bp->b_addr) + nameidx); |
| 449 | if (lentry->namelen <= 0) { |
| 450 | xchk_da_set_corrupt(ds, level); |
| 451 | goto out; |
| 452 | } |
| 453 | calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen); |
| 454 | } else { |
| 455 | rentry = (struct xfs_attr_leaf_name_remote *) |
| 456 | (((char *)bp->b_addr) + nameidx); |
| 457 | if (rentry->namelen <= 0) { |
| 458 | xchk_da_set_corrupt(ds, level); |
| 459 | goto out; |
| 460 | } |
| 461 | calc_hash = xfs_da_hashname(rentry->name, rentry->namelen); |
| 462 | } |
| 463 | if (calc_hash != hash) |
| 464 | xchk_da_set_corrupt(ds, level); |
| 465 | |
| 466 | out: |
| 467 | return error; |
| 468 | } |
| 469 | |
| 470 | /* Scrub the extended attribute metadata. */ |
| 471 | int |
| 472 | xchk_xattr( |
| 473 | struct xfs_scrub *sc) |
| 474 | { |
| 475 | struct xchk_xattr sx; |
| 476 | struct attrlist_cursor_kern cursor = { 0 }; |
| 477 | xfs_dablk_t last_checked = -1U; |
| 478 | int error = 0; |
| 479 | |
| 480 | if (!xfs_inode_hasattr(sc->ip)) |
| 481 | return -ENOENT; |
| 482 | |
| 483 | memset(&sx, 0, sizeof(sx)); |
| 484 | /* Check attribute tree structure */ |
| 485 | error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec, |
| 486 | &last_checked); |
| 487 | if (error) |
| 488 | goto out; |
| 489 | |
| 490 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
| 491 | goto out; |
| 492 | |
| 493 | /* Check that every attr key can also be looked up by hash. */ |
| 494 | sx.context.dp = sc->ip; |
| 495 | sx.context.cursor = &cursor; |
| 496 | sx.context.resynch = 1; |
| 497 | sx.context.put_listent = xchk_xattr_listent; |
| 498 | sx.context.tp = sc->tp; |
| 499 | sx.context.flags = ATTR_INCOMPLETE; |
| 500 | sx.sc = sc; |
| 501 | |
| 502 | /* |
| 503 | * Look up every xattr in this file by name. |
| 504 | * |
| 505 | * Use the backend implementation of xfs_attr_list to call |
| 506 | * xchk_xattr_listent on every attribute key in this inode. |
| 507 | * In other words, we use the same iterator/callback mechanism |
| 508 | * that listattr uses to scrub extended attributes, though in our |
| 509 | * _listent function, we check the value of the attribute. |
| 510 | * |
| 511 | * The VFS only locks i_rwsem when modifying attrs, so keep all |
| 512 | * three locks held because that's the only way to ensure we're |
| 513 | * the only thread poking into the da btree. We traverse the da |
| 514 | * btree while holding a leaf buffer locked for the xattr name |
| 515 | * iteration, which doesn't really follow the usual buffer |
| 516 | * locking order. |
| 517 | */ |
| 518 | error = xfs_attr_list_int_ilocked(&sx.context); |
| 519 | if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error)) |
| 520 | goto out; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 521 | |
| 522 | /* Did our listent function try to return any errors? */ |
| 523 | if (sx.context.seen_enough < 0) |
| 524 | error = sx.context.seen_enough; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 525 | out: |
| 526 | return error; |
| 527 | } |