Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2001-2003 Sistina Software (UK) Limited. |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #include "dm.h" |
| 8 | #include <linux/device-mapper.h> |
| 9 | |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/blkdev.h> |
| 13 | #include <linux/bio.h> |
| 14 | #include <linux/dax.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/log2.h> |
| 17 | |
| 18 | #define DM_MSG_PREFIX "striped" |
| 19 | #define DM_IO_ERROR_THRESHOLD 15 |
| 20 | |
| 21 | struct stripe { |
| 22 | struct dm_dev *dev; |
| 23 | sector_t physical_start; |
| 24 | |
| 25 | atomic_t error_count; |
| 26 | }; |
| 27 | |
| 28 | struct stripe_c { |
| 29 | uint32_t stripes; |
| 30 | int stripes_shift; |
| 31 | |
| 32 | /* The size of this target / num. stripes */ |
| 33 | sector_t stripe_width; |
| 34 | |
| 35 | uint32_t chunk_size; |
| 36 | int chunk_size_shift; |
| 37 | |
| 38 | /* Needed for handling events */ |
| 39 | struct dm_target *ti; |
| 40 | |
| 41 | /* Work struct used for triggering events*/ |
| 42 | struct work_struct trigger_event; |
| 43 | |
| 44 | struct stripe stripe[0]; |
| 45 | }; |
| 46 | |
| 47 | /* |
| 48 | * An event is triggered whenever a drive |
| 49 | * drops out of a stripe volume. |
| 50 | */ |
| 51 | static void trigger_event(struct work_struct *work) |
| 52 | { |
| 53 | struct stripe_c *sc = container_of(work, struct stripe_c, |
| 54 | trigger_event); |
| 55 | dm_table_event(sc->ti->table); |
| 56 | } |
| 57 | |
| 58 | static inline struct stripe_c *alloc_context(unsigned int stripes) |
| 59 | { |
| 60 | size_t len; |
| 61 | |
| 62 | if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), |
| 63 | stripes)) |
| 64 | return NULL; |
| 65 | |
| 66 | len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); |
| 67 | |
| 68 | return kmalloc(len, GFP_KERNEL); |
| 69 | } |
| 70 | |
| 71 | /* |
| 72 | * Parse a single <dev> <sector> pair |
| 73 | */ |
| 74 | static int get_stripe(struct dm_target *ti, struct stripe_c *sc, |
| 75 | unsigned int stripe, char **argv) |
| 76 | { |
| 77 | unsigned long long start; |
| 78 | char dummy; |
| 79 | int ret; |
| 80 | |
| 81 | if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1) |
| 82 | return -EINVAL; |
| 83 | |
| 84 | ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), |
| 85 | &sc->stripe[stripe].dev); |
| 86 | if (ret) |
| 87 | return ret; |
| 88 | |
| 89 | sc->stripe[stripe].physical_start = start; |
| 90 | |
| 91 | return 0; |
| 92 | } |
| 93 | |
| 94 | /* |
| 95 | * Construct a striped mapping. |
| 96 | * <number of stripes> <chunk size> [<dev_path> <offset>]+ |
| 97 | */ |
| 98 | static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
| 99 | { |
| 100 | struct stripe_c *sc; |
| 101 | sector_t width, tmp_len; |
| 102 | uint32_t stripes; |
| 103 | uint32_t chunk_size; |
| 104 | int r; |
| 105 | unsigned int i; |
| 106 | |
| 107 | if (argc < 2) { |
| 108 | ti->error = "Not enough arguments"; |
| 109 | return -EINVAL; |
| 110 | } |
| 111 | |
| 112 | if (kstrtouint(argv[0], 10, &stripes) || !stripes) { |
| 113 | ti->error = "Invalid stripe count"; |
| 114 | return -EINVAL; |
| 115 | } |
| 116 | |
| 117 | if (kstrtouint(argv[1], 10, &chunk_size) || !chunk_size) { |
| 118 | ti->error = "Invalid chunk_size"; |
| 119 | return -EINVAL; |
| 120 | } |
| 121 | |
| 122 | width = ti->len; |
| 123 | if (sector_div(width, stripes)) { |
| 124 | ti->error = "Target length not divisible by " |
| 125 | "number of stripes"; |
| 126 | return -EINVAL; |
| 127 | } |
| 128 | |
| 129 | tmp_len = width; |
| 130 | if (sector_div(tmp_len, chunk_size)) { |
| 131 | ti->error = "Target length not divisible by " |
| 132 | "chunk size"; |
| 133 | return -EINVAL; |
| 134 | } |
| 135 | |
| 136 | /* |
| 137 | * Do we have enough arguments for that many stripes ? |
| 138 | */ |
| 139 | if (argc != (2 + 2 * stripes)) { |
| 140 | ti->error = "Not enough destinations " |
| 141 | "specified"; |
| 142 | return -EINVAL; |
| 143 | } |
| 144 | |
| 145 | sc = alloc_context(stripes); |
| 146 | if (!sc) { |
| 147 | ti->error = "Memory allocation for striped context " |
| 148 | "failed"; |
| 149 | return -ENOMEM; |
| 150 | } |
| 151 | |
| 152 | INIT_WORK(&sc->trigger_event, trigger_event); |
| 153 | |
| 154 | /* Set pointer to dm target; used in trigger_event */ |
| 155 | sc->ti = ti; |
| 156 | sc->stripes = stripes; |
| 157 | sc->stripe_width = width; |
| 158 | |
| 159 | if (stripes & (stripes - 1)) |
| 160 | sc->stripes_shift = -1; |
| 161 | else |
| 162 | sc->stripes_shift = __ffs(stripes); |
| 163 | |
| 164 | r = dm_set_target_max_io_len(ti, chunk_size); |
| 165 | if (r) { |
| 166 | kfree(sc); |
| 167 | return r; |
| 168 | } |
| 169 | |
| 170 | ti->num_flush_bios = stripes; |
| 171 | ti->num_discard_bios = stripes; |
| 172 | ti->num_secure_erase_bios = stripes; |
| 173 | ti->num_write_same_bios = stripes; |
| 174 | ti->num_write_zeroes_bios = stripes; |
| 175 | |
| 176 | sc->chunk_size = chunk_size; |
| 177 | if (chunk_size & (chunk_size - 1)) |
| 178 | sc->chunk_size_shift = -1; |
| 179 | else |
| 180 | sc->chunk_size_shift = __ffs(chunk_size); |
| 181 | |
| 182 | /* |
| 183 | * Get the stripe destinations. |
| 184 | */ |
| 185 | for (i = 0; i < stripes; i++) { |
| 186 | argv += 2; |
| 187 | |
| 188 | r = get_stripe(ti, sc, i, argv); |
| 189 | if (r < 0) { |
| 190 | ti->error = "Couldn't parse stripe destination"; |
| 191 | while (i--) |
| 192 | dm_put_device(ti, sc->stripe[i].dev); |
| 193 | kfree(sc); |
| 194 | return r; |
| 195 | } |
| 196 | atomic_set(&(sc->stripe[i].error_count), 0); |
| 197 | } |
| 198 | |
| 199 | ti->private = sc; |
| 200 | |
| 201 | return 0; |
| 202 | } |
| 203 | |
| 204 | static void stripe_dtr(struct dm_target *ti) |
| 205 | { |
| 206 | unsigned int i; |
| 207 | struct stripe_c *sc = (struct stripe_c *) ti->private; |
| 208 | |
| 209 | for (i = 0; i < sc->stripes; i++) |
| 210 | dm_put_device(ti, sc->stripe[i].dev); |
| 211 | |
| 212 | flush_work(&sc->trigger_event); |
| 213 | kfree(sc); |
| 214 | } |
| 215 | |
| 216 | static void stripe_map_sector(struct stripe_c *sc, sector_t sector, |
| 217 | uint32_t *stripe, sector_t *result) |
| 218 | { |
| 219 | sector_t chunk = dm_target_offset(sc->ti, sector); |
| 220 | sector_t chunk_offset; |
| 221 | |
| 222 | if (sc->chunk_size_shift < 0) |
| 223 | chunk_offset = sector_div(chunk, sc->chunk_size); |
| 224 | else { |
| 225 | chunk_offset = chunk & (sc->chunk_size - 1); |
| 226 | chunk >>= sc->chunk_size_shift; |
| 227 | } |
| 228 | |
| 229 | if (sc->stripes_shift < 0) |
| 230 | *stripe = sector_div(chunk, sc->stripes); |
| 231 | else { |
| 232 | *stripe = chunk & (sc->stripes - 1); |
| 233 | chunk >>= sc->stripes_shift; |
| 234 | } |
| 235 | |
| 236 | if (sc->chunk_size_shift < 0) |
| 237 | chunk *= sc->chunk_size; |
| 238 | else |
| 239 | chunk <<= sc->chunk_size_shift; |
| 240 | |
| 241 | *result = chunk + chunk_offset; |
| 242 | } |
| 243 | |
| 244 | static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, |
| 245 | uint32_t target_stripe, sector_t *result) |
| 246 | { |
| 247 | uint32_t stripe; |
| 248 | |
| 249 | stripe_map_sector(sc, sector, &stripe, result); |
| 250 | if (stripe == target_stripe) |
| 251 | return; |
| 252 | |
| 253 | /* round down */ |
| 254 | sector = *result; |
| 255 | if (sc->chunk_size_shift < 0) |
| 256 | *result -= sector_div(sector, sc->chunk_size); |
| 257 | else |
| 258 | *result = sector & ~(sector_t)(sc->chunk_size - 1); |
| 259 | |
| 260 | if (target_stripe < stripe) |
| 261 | *result += sc->chunk_size; /* next chunk */ |
| 262 | } |
| 263 | |
| 264 | static int stripe_map_range(struct stripe_c *sc, struct bio *bio, |
| 265 | uint32_t target_stripe) |
| 266 | { |
| 267 | sector_t begin, end; |
| 268 | |
| 269 | stripe_map_range_sector(sc, bio->bi_iter.bi_sector, |
| 270 | target_stripe, &begin); |
| 271 | stripe_map_range_sector(sc, bio_end_sector(bio), |
| 272 | target_stripe, &end); |
| 273 | if (begin < end) { |
| 274 | bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev); |
| 275 | bio->bi_iter.bi_sector = begin + |
| 276 | sc->stripe[target_stripe].physical_start; |
| 277 | bio->bi_iter.bi_size = to_bytes(end - begin); |
| 278 | return DM_MAPIO_REMAPPED; |
| 279 | } else { |
| 280 | /* The range doesn't map to the target stripe */ |
| 281 | bio_endio(bio); |
| 282 | return DM_MAPIO_SUBMITTED; |
| 283 | } |
| 284 | } |
| 285 | |
| 286 | static int stripe_map(struct dm_target *ti, struct bio *bio) |
| 287 | { |
| 288 | struct stripe_c *sc = ti->private; |
| 289 | uint32_t stripe; |
| 290 | unsigned target_bio_nr; |
| 291 | |
| 292 | if (bio->bi_opf & REQ_PREFLUSH) { |
| 293 | target_bio_nr = dm_bio_get_target_bio_nr(bio); |
| 294 | BUG_ON(target_bio_nr >= sc->stripes); |
| 295 | bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev); |
| 296 | return DM_MAPIO_REMAPPED; |
| 297 | } |
| 298 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || |
| 299 | unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) || |
| 300 | unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) || |
| 301 | unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) { |
| 302 | target_bio_nr = dm_bio_get_target_bio_nr(bio); |
| 303 | BUG_ON(target_bio_nr >= sc->stripes); |
| 304 | return stripe_map_range(sc, bio, target_bio_nr); |
| 305 | } |
| 306 | |
| 307 | stripe_map_sector(sc, bio->bi_iter.bi_sector, |
| 308 | &stripe, &bio->bi_iter.bi_sector); |
| 309 | |
| 310 | bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; |
| 311 | bio_set_dev(bio, sc->stripe[stripe].dev->bdev); |
| 312 | |
| 313 | return DM_MAPIO_REMAPPED; |
| 314 | } |
| 315 | |
| 316 | #if IS_ENABLED(CONFIG_DAX_DRIVER) |
| 317 | static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, |
| 318 | long nr_pages, void **kaddr, pfn_t *pfn) |
| 319 | { |
| 320 | sector_t dev_sector, sector = pgoff * PAGE_SECTORS; |
| 321 | struct stripe_c *sc = ti->private; |
| 322 | struct dax_device *dax_dev; |
| 323 | struct block_device *bdev; |
| 324 | uint32_t stripe; |
| 325 | long ret; |
| 326 | |
| 327 | stripe_map_sector(sc, sector, &stripe, &dev_sector); |
| 328 | dev_sector += sc->stripe[stripe].physical_start; |
| 329 | dax_dev = sc->stripe[stripe].dev->dax_dev; |
| 330 | bdev = sc->stripe[stripe].dev->bdev; |
| 331 | |
| 332 | ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff); |
| 333 | if (ret) |
| 334 | return ret; |
| 335 | return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); |
| 336 | } |
| 337 | |
| 338 | static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, |
| 339 | void *addr, size_t bytes, struct iov_iter *i) |
| 340 | { |
| 341 | sector_t dev_sector, sector = pgoff * PAGE_SECTORS; |
| 342 | struct stripe_c *sc = ti->private; |
| 343 | struct dax_device *dax_dev; |
| 344 | struct block_device *bdev; |
| 345 | uint32_t stripe; |
| 346 | |
| 347 | stripe_map_sector(sc, sector, &stripe, &dev_sector); |
| 348 | dev_sector += sc->stripe[stripe].physical_start; |
| 349 | dax_dev = sc->stripe[stripe].dev->dax_dev; |
| 350 | bdev = sc->stripe[stripe].dev->bdev; |
| 351 | |
| 352 | if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) |
| 353 | return 0; |
| 354 | return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); |
| 355 | } |
| 356 | |
| 357 | static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, |
| 358 | void *addr, size_t bytes, struct iov_iter *i) |
| 359 | { |
| 360 | sector_t dev_sector, sector = pgoff * PAGE_SECTORS; |
| 361 | struct stripe_c *sc = ti->private; |
| 362 | struct dax_device *dax_dev; |
| 363 | struct block_device *bdev; |
| 364 | uint32_t stripe; |
| 365 | |
| 366 | stripe_map_sector(sc, sector, &stripe, &dev_sector); |
| 367 | dev_sector += sc->stripe[stripe].physical_start; |
| 368 | dax_dev = sc->stripe[stripe].dev->dax_dev; |
| 369 | bdev = sc->stripe[stripe].dev->bdev; |
| 370 | |
| 371 | if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) |
| 372 | return 0; |
| 373 | return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i); |
| 374 | } |
| 375 | |
| 376 | #else |
| 377 | #define stripe_dax_direct_access NULL |
| 378 | #define stripe_dax_copy_from_iter NULL |
| 379 | #define stripe_dax_copy_to_iter NULL |
| 380 | #endif |
| 381 | |
| 382 | /* |
| 383 | * Stripe status: |
| 384 | * |
| 385 | * INFO |
| 386 | * #stripes [stripe_name <stripe_name>] [group word count] |
| 387 | * [error count 'A|D' <error count 'A|D'>] |
| 388 | * |
| 389 | * TABLE |
| 390 | * #stripes [stripe chunk size] |
| 391 | * [stripe_name physical_start <stripe_name physical_start>] |
| 392 | * |
| 393 | */ |
| 394 | |
| 395 | static void stripe_status(struct dm_target *ti, status_type_t type, |
| 396 | unsigned status_flags, char *result, unsigned maxlen) |
| 397 | { |
| 398 | struct stripe_c *sc = (struct stripe_c *) ti->private; |
| 399 | unsigned int sz = 0; |
| 400 | unsigned int i; |
| 401 | |
| 402 | switch (type) { |
| 403 | case STATUSTYPE_INFO: |
| 404 | DMEMIT("%d ", sc->stripes); |
| 405 | for (i = 0; i < sc->stripes; i++) { |
| 406 | DMEMIT("%s ", sc->stripe[i].dev->name); |
| 407 | } |
| 408 | DMEMIT("1 "); |
| 409 | for (i = 0; i < sc->stripes; i++) { |
| 410 | DMEMIT("%c", atomic_read(&(sc->stripe[i].error_count)) ? |
| 411 | 'D' : 'A'); |
| 412 | } |
| 413 | break; |
| 414 | |
| 415 | case STATUSTYPE_TABLE: |
| 416 | DMEMIT("%d %llu", sc->stripes, |
| 417 | (unsigned long long)sc->chunk_size); |
| 418 | for (i = 0; i < sc->stripes; i++) |
| 419 | DMEMIT(" %s %llu", sc->stripe[i].dev->name, |
| 420 | (unsigned long long)sc->stripe[i].physical_start); |
| 421 | break; |
| 422 | } |
| 423 | } |
| 424 | |
| 425 | static int stripe_end_io(struct dm_target *ti, struct bio *bio, |
| 426 | blk_status_t *error) |
| 427 | { |
| 428 | unsigned i; |
| 429 | char major_minor[16]; |
| 430 | struct stripe_c *sc = ti->private; |
| 431 | |
| 432 | if (!*error) |
| 433 | return DM_ENDIO_DONE; /* I/O complete */ |
| 434 | |
| 435 | if (bio->bi_opf & REQ_RAHEAD) |
| 436 | return DM_ENDIO_DONE; |
| 437 | |
| 438 | if (*error == BLK_STS_NOTSUPP) |
| 439 | return DM_ENDIO_DONE; |
| 440 | |
| 441 | memset(major_minor, 0, sizeof(major_minor)); |
| 442 | sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio))); |
| 443 | |
| 444 | /* |
| 445 | * Test to see which stripe drive triggered the event |
| 446 | * and increment error count for all stripes on that device. |
| 447 | * If the error count for a given device exceeds the threshold |
| 448 | * value we will no longer trigger any further events. |
| 449 | */ |
| 450 | for (i = 0; i < sc->stripes; i++) |
| 451 | if (!strcmp(sc->stripe[i].dev->name, major_minor)) { |
| 452 | atomic_inc(&(sc->stripe[i].error_count)); |
| 453 | if (atomic_read(&(sc->stripe[i].error_count)) < |
| 454 | DM_IO_ERROR_THRESHOLD) |
| 455 | schedule_work(&sc->trigger_event); |
| 456 | } |
| 457 | |
| 458 | return DM_ENDIO_DONE; |
| 459 | } |
| 460 | |
| 461 | static int stripe_iterate_devices(struct dm_target *ti, |
| 462 | iterate_devices_callout_fn fn, void *data) |
| 463 | { |
| 464 | struct stripe_c *sc = ti->private; |
| 465 | int ret = 0; |
| 466 | unsigned i = 0; |
| 467 | |
| 468 | do { |
| 469 | ret = fn(ti, sc->stripe[i].dev, |
| 470 | sc->stripe[i].physical_start, |
| 471 | sc->stripe_width, data); |
| 472 | } while (!ret && ++i < sc->stripes); |
| 473 | |
| 474 | return ret; |
| 475 | } |
| 476 | |
| 477 | static void stripe_io_hints(struct dm_target *ti, |
| 478 | struct queue_limits *limits) |
| 479 | { |
| 480 | struct stripe_c *sc = ti->private; |
| 481 | unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT; |
| 482 | |
| 483 | blk_limits_io_min(limits, chunk_size); |
| 484 | blk_limits_io_opt(limits, chunk_size * sc->stripes); |
| 485 | } |
| 486 | |
| 487 | static struct target_type stripe_target = { |
| 488 | .name = "striped", |
| 489 | .version = {1, 6, 0}, |
| 490 | .features = DM_TARGET_PASSES_INTEGRITY, |
| 491 | .module = THIS_MODULE, |
| 492 | .ctr = stripe_ctr, |
| 493 | .dtr = stripe_dtr, |
| 494 | .map = stripe_map, |
| 495 | .end_io = stripe_end_io, |
| 496 | .status = stripe_status, |
| 497 | .iterate_devices = stripe_iterate_devices, |
| 498 | .io_hints = stripe_io_hints, |
| 499 | .direct_access = stripe_dax_direct_access, |
| 500 | .dax_copy_from_iter = stripe_dax_copy_from_iter, |
| 501 | .dax_copy_to_iter = stripe_dax_copy_to_iter, |
| 502 | }; |
| 503 | |
| 504 | int __init dm_stripe_init(void) |
| 505 | { |
| 506 | int r; |
| 507 | |
| 508 | r = dm_register_target(&stripe_target); |
| 509 | if (r < 0) |
| 510 | DMWARN("target registration failed"); |
| 511 | |
| 512 | return r; |
| 513 | } |
| 514 | |
| 515 | void dm_stripe_exit(void) |
| 516 | { |
| 517 | dm_unregister_target(&stripe_target); |
| 518 | } |