Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2001 Sistina Software (UK) Limited. |
| 3 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
| 4 | * |
| 5 | * This file is released under the LGPL. |
| 6 | */ |
| 7 | |
| 8 | #ifndef _LINUX_DEVICE_MAPPER_H |
| 9 | #define _LINUX_DEVICE_MAPPER_H |
| 10 | |
| 11 | #include <linux/bio.h> |
| 12 | #include <linux/blkdev.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 13 | #include <linux/dm-ioctl.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 14 | #include <linux/math64.h> |
| 15 | #include <linux/ratelimit.h> |
| 16 | |
| 17 | struct dm_dev; |
| 18 | struct dm_target; |
| 19 | struct dm_table; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 20 | struct dm_report_zones_args; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 21 | struct mapped_device; |
| 22 | struct bio_vec; |
| 23 | |
| 24 | /* |
| 25 | * Type of table, mapped_device's mempool and request_queue |
| 26 | */ |
| 27 | enum dm_queue_mode { |
| 28 | DM_TYPE_NONE = 0, |
| 29 | DM_TYPE_BIO_BASED = 1, |
| 30 | DM_TYPE_REQUEST_BASED = 2, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 31 | DM_TYPE_DAX_BIO_BASED = 3, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 32 | }; |
| 33 | |
| 34 | typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; |
| 35 | |
| 36 | union map_info { |
| 37 | void *ptr; |
| 38 | }; |
| 39 | |
| 40 | /* |
| 41 | * In the constructor the target parameter will already have the |
| 42 | * table, type, begin and len fields filled in. |
| 43 | */ |
| 44 | typedef int (*dm_ctr_fn) (struct dm_target *target, |
| 45 | unsigned int argc, char **argv); |
| 46 | |
| 47 | /* |
| 48 | * The destructor doesn't need to free the dm_target, just |
| 49 | * anything hidden ti->private. |
| 50 | */ |
| 51 | typedef void (*dm_dtr_fn) (struct dm_target *ti); |
| 52 | |
| 53 | /* |
| 54 | * The map function must return: |
| 55 | * < 0: error |
| 56 | * = 0: The target will handle the io by resubmitting it later |
| 57 | * = 1: simple remap complete |
| 58 | * = 2: The target wants to push back the io |
| 59 | */ |
| 60 | typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); |
| 61 | typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, |
| 62 | struct request *rq, |
| 63 | union map_info *map_context, |
| 64 | struct request **clone); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 65 | typedef void (*dm_release_clone_request_fn) (struct request *clone, |
| 66 | union map_info *map_context); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 67 | |
| 68 | /* |
| 69 | * Returns: |
| 70 | * < 0 : error (currently ignored) |
| 71 | * 0 : ended successfully |
| 72 | * 1 : for some reason the io has still not completed (eg, |
| 73 | * multipath target might want to requeue a failed io). |
| 74 | * 2 : The target wants to push back the io |
| 75 | */ |
| 76 | typedef int (*dm_endio_fn) (struct dm_target *ti, |
| 77 | struct bio *bio, blk_status_t *error); |
| 78 | typedef int (*dm_request_endio_fn) (struct dm_target *ti, |
| 79 | struct request *clone, blk_status_t error, |
| 80 | union map_info *map_context); |
| 81 | |
| 82 | typedef void (*dm_presuspend_fn) (struct dm_target *ti); |
| 83 | typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); |
| 84 | typedef void (*dm_postsuspend_fn) (struct dm_target *ti); |
| 85 | typedef int (*dm_preresume_fn) (struct dm_target *ti); |
| 86 | typedef void (*dm_resume_fn) (struct dm_target *ti); |
| 87 | |
| 88 | typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, |
| 89 | unsigned status_flags, char *result, unsigned maxlen); |
| 90 | |
| 91 | typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv, |
| 92 | char *result, unsigned maxlen); |
| 93 | |
| 94 | typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); |
| 95 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 96 | typedef int (*dm_report_zones_fn) (struct dm_target *ti, |
| 97 | struct dm_report_zones_args *args, |
| 98 | unsigned int nr_zones); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 99 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 100 | /* |
| 101 | * These iteration functions are typically used to check (and combine) |
| 102 | * properties of underlying devices. |
| 103 | * E.g. Does at least one underlying device support flush? |
| 104 | * Does any underlying device not support WRITE_SAME? |
| 105 | * |
| 106 | * The callout function is called once for each contiguous section of |
| 107 | * an underlying device. State can be maintained in *data. |
| 108 | * Return non-zero to stop iterating through any further devices. |
| 109 | */ |
| 110 | typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, |
| 111 | struct dm_dev *dev, |
| 112 | sector_t start, sector_t len, |
| 113 | void *data); |
| 114 | |
| 115 | /* |
| 116 | * This function must iterate through each section of device used by the |
| 117 | * target until it encounters a non-zero return code, which it then returns. |
| 118 | * Returns zero if no callout returned non-zero. |
| 119 | */ |
| 120 | typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, |
| 121 | iterate_devices_callout_fn fn, |
| 122 | void *data); |
| 123 | |
| 124 | typedef void (*dm_io_hints_fn) (struct dm_target *ti, |
| 125 | struct queue_limits *limits); |
| 126 | |
| 127 | /* |
| 128 | * Returns: |
| 129 | * 0: The target can handle the next I/O immediately. |
| 130 | * 1: The target can't handle the next I/O immediately. |
| 131 | */ |
| 132 | typedef int (*dm_busy_fn) (struct dm_target *ti); |
| 133 | |
| 134 | /* |
| 135 | * Returns: |
| 136 | * < 0 : error |
| 137 | * >= 0 : the number of bytes accessible at the address |
| 138 | */ |
| 139 | typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, |
| 140 | long nr_pages, void **kaddr, pfn_t *pfn); |
| 141 | typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff, |
| 142 | void *addr, size_t bytes, struct iov_iter *i); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 143 | typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, |
| 144 | size_t nr_pages); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 145 | #define PAGE_SECTORS (PAGE_SIZE / 512) |
| 146 | |
| 147 | void dm_error(const char *message); |
| 148 | |
| 149 | struct dm_dev { |
| 150 | struct block_device *bdev; |
| 151 | struct dax_device *dax_dev; |
| 152 | fmode_t mode; |
| 153 | char name[16]; |
| 154 | }; |
| 155 | |
| 156 | dev_t dm_get_dev_t(const char *path); |
| 157 | |
| 158 | /* |
| 159 | * Constructors should call these functions to ensure destination devices |
| 160 | * are opened/closed correctly. |
| 161 | */ |
| 162 | int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, |
| 163 | struct dm_dev **result); |
| 164 | void dm_put_device(struct dm_target *ti, struct dm_dev *d); |
| 165 | |
| 166 | /* |
| 167 | * Information about a target type |
| 168 | */ |
| 169 | |
| 170 | struct target_type { |
| 171 | uint64_t features; |
| 172 | const char *name; |
| 173 | struct module *module; |
| 174 | unsigned version[3]; |
| 175 | dm_ctr_fn ctr; |
| 176 | dm_dtr_fn dtr; |
| 177 | dm_map_fn map; |
| 178 | dm_clone_and_map_request_fn clone_and_map_rq; |
| 179 | dm_release_clone_request_fn release_clone_rq; |
| 180 | dm_endio_fn end_io; |
| 181 | dm_request_endio_fn rq_end_io; |
| 182 | dm_presuspend_fn presuspend; |
| 183 | dm_presuspend_undo_fn presuspend_undo; |
| 184 | dm_postsuspend_fn postsuspend; |
| 185 | dm_preresume_fn preresume; |
| 186 | dm_resume_fn resume; |
| 187 | dm_status_fn status; |
| 188 | dm_message_fn message; |
| 189 | dm_prepare_ioctl_fn prepare_ioctl; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 190 | #ifdef CONFIG_BLK_DEV_ZONED |
| 191 | dm_report_zones_fn report_zones; |
| 192 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 193 | dm_busy_fn busy; |
| 194 | dm_iterate_devices_fn iterate_devices; |
| 195 | dm_io_hints_fn io_hints; |
| 196 | dm_dax_direct_access_fn direct_access; |
| 197 | dm_dax_copy_iter_fn dax_copy_from_iter; |
| 198 | dm_dax_copy_iter_fn dax_copy_to_iter; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 199 | dm_dax_zero_page_range_fn dax_zero_page_range; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 200 | |
| 201 | /* For internal device-mapper use. */ |
| 202 | struct list_head list; |
| 203 | }; |
| 204 | |
| 205 | /* |
| 206 | * Target features |
| 207 | */ |
| 208 | |
| 209 | /* |
| 210 | * Any table that contains an instance of this target must have only one. |
| 211 | */ |
| 212 | #define DM_TARGET_SINGLETON 0x00000001 |
| 213 | #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) |
| 214 | |
| 215 | /* |
| 216 | * Indicates that a target does not support read-only devices. |
| 217 | */ |
| 218 | #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 |
| 219 | #define dm_target_always_writeable(type) \ |
| 220 | ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) |
| 221 | |
| 222 | /* |
| 223 | * Any device that contains a table with an instance of this target may never |
| 224 | * have tables containing any different target type. |
| 225 | */ |
| 226 | #define DM_TARGET_IMMUTABLE 0x00000004 |
| 227 | #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) |
| 228 | |
| 229 | /* |
| 230 | * Indicates that a target may replace any target; even immutable targets. |
| 231 | * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. |
| 232 | */ |
| 233 | #define DM_TARGET_WILDCARD 0x00000008 |
| 234 | #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) |
| 235 | |
| 236 | /* |
| 237 | * A target implements own bio data integrity. |
| 238 | */ |
| 239 | #define DM_TARGET_INTEGRITY 0x00000010 |
| 240 | #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY) |
| 241 | |
| 242 | /* |
| 243 | * A target passes integrity data to the lower device. |
| 244 | */ |
| 245 | #define DM_TARGET_PASSES_INTEGRITY 0x00000020 |
| 246 | #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) |
| 247 | |
| 248 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 249 | * Indicates support for zoned block devices: |
| 250 | * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned |
| 251 | * block devices but does not support combining different zoned models. |
| 252 | * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple |
| 253 | * devices with different zoned models. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 254 | */ |
| 255 | #define DM_TARGET_ZONED_HM 0x00000040 |
| 256 | #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) |
| 257 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 258 | /* |
| 259 | * A target handles REQ_NOWAIT |
| 260 | */ |
| 261 | #define DM_TARGET_NOWAIT 0x00000080 |
| 262 | #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT) |
| 263 | |
| 264 | #ifdef CONFIG_BLK_DEV_ZONED |
| 265 | #define DM_TARGET_MIXED_ZONED_MODEL 0x00000200 |
| 266 | #define dm_target_supports_mixed_zoned_model(type) \ |
| 267 | ((type)->features & DM_TARGET_MIXED_ZONED_MODEL) |
| 268 | #else |
| 269 | #define DM_TARGET_MIXED_ZONED_MODEL 0x00000000 |
| 270 | #define dm_target_supports_mixed_zoned_model(type) (false) |
| 271 | #endif |
| 272 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 273 | struct dm_target { |
| 274 | struct dm_table *table; |
| 275 | struct target_type *type; |
| 276 | |
| 277 | /* target limits */ |
| 278 | sector_t begin; |
| 279 | sector_t len; |
| 280 | |
| 281 | /* If non-zero, maximum size of I/O submitted to a target. */ |
| 282 | uint32_t max_io_len; |
| 283 | |
| 284 | /* |
| 285 | * A number of zero-length barrier bios that will be submitted |
| 286 | * to the target for the purpose of flushing cache. |
| 287 | * |
| 288 | * The bio number can be accessed with dm_bio_get_target_bio_nr. |
| 289 | * It is a responsibility of the target driver to remap these bios |
| 290 | * to the real underlying devices. |
| 291 | */ |
| 292 | unsigned num_flush_bios; |
| 293 | |
| 294 | /* |
| 295 | * The number of discard bios that will be submitted to the target. |
| 296 | * The bio number can be accessed with dm_bio_get_target_bio_nr. |
| 297 | */ |
| 298 | unsigned num_discard_bios; |
| 299 | |
| 300 | /* |
| 301 | * The number of secure erase bios that will be submitted to the target. |
| 302 | * The bio number can be accessed with dm_bio_get_target_bio_nr. |
| 303 | */ |
| 304 | unsigned num_secure_erase_bios; |
| 305 | |
| 306 | /* |
| 307 | * The number of WRITE SAME bios that will be submitted to the target. |
| 308 | * The bio number can be accessed with dm_bio_get_target_bio_nr. |
| 309 | */ |
| 310 | unsigned num_write_same_bios; |
| 311 | |
| 312 | /* |
| 313 | * The number of WRITE ZEROES bios that will be submitted to the target. |
| 314 | * The bio number can be accessed with dm_bio_get_target_bio_nr. |
| 315 | */ |
| 316 | unsigned num_write_zeroes_bios; |
| 317 | |
| 318 | /* |
| 319 | * The minimum number of extra bytes allocated in each io for the |
| 320 | * target to use. |
| 321 | */ |
| 322 | unsigned per_io_data_size; |
| 323 | |
| 324 | /* target specific data */ |
| 325 | void *private; |
| 326 | |
| 327 | /* Used to provide an error string from the ctr */ |
| 328 | char *error; |
| 329 | |
| 330 | /* |
| 331 | * Set if this target needs to receive flushes regardless of |
| 332 | * whether or not its underlying devices have support. |
| 333 | */ |
| 334 | bool flush_supported:1; |
| 335 | |
| 336 | /* |
| 337 | * Set if this target needs to receive discards regardless of |
| 338 | * whether or not its underlying devices have support. |
| 339 | */ |
| 340 | bool discards_supported:1; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 341 | |
| 342 | /* |
| 343 | * Set if we need to limit the number of in-flight bios when swapping. |
| 344 | */ |
| 345 | bool limit_swap_bios:1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 346 | }; |
| 347 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 348 | void *dm_per_bio_data(struct bio *bio, size_t data_size); |
| 349 | struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); |
| 350 | unsigned dm_bio_get_target_bio_nr(const struct bio *bio); |
| 351 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 352 | u64 dm_start_time_ns_from_clone(struct bio *bio); |
| 353 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 354 | int dm_register_target(struct target_type *t); |
| 355 | void dm_unregister_target(struct target_type *t); |
| 356 | |
| 357 | /* |
| 358 | * Target argument parsing. |
| 359 | */ |
| 360 | struct dm_arg_set { |
| 361 | unsigned argc; |
| 362 | char **argv; |
| 363 | }; |
| 364 | |
| 365 | /* |
| 366 | * The minimum and maximum value of a numeric argument, together with |
| 367 | * the error message to use if the number is found to be outside that range. |
| 368 | */ |
| 369 | struct dm_arg { |
| 370 | unsigned min; |
| 371 | unsigned max; |
| 372 | char *error; |
| 373 | }; |
| 374 | |
| 375 | /* |
| 376 | * Validate the next argument, either returning it as *value or, if invalid, |
| 377 | * returning -EINVAL and setting *error. |
| 378 | */ |
| 379 | int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
| 380 | unsigned *value, char **error); |
| 381 | |
| 382 | /* |
| 383 | * Process the next argument as the start of a group containing between |
| 384 | * arg->min and arg->max further arguments. Either return the size as |
| 385 | * *num_args or, if invalid, return -EINVAL and set *error. |
| 386 | */ |
| 387 | int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
| 388 | unsigned *num_args, char **error); |
| 389 | |
| 390 | /* |
| 391 | * Return the current argument and shift to the next. |
| 392 | */ |
| 393 | const char *dm_shift_arg(struct dm_arg_set *as); |
| 394 | |
| 395 | /* |
| 396 | * Move through num_args arguments. |
| 397 | */ |
| 398 | void dm_consume_args(struct dm_arg_set *as, unsigned num_args); |
| 399 | |
| 400 | /*----------------------------------------------------------------- |
| 401 | * Functions for creating and manipulating mapped devices. |
| 402 | * Drop the reference with dm_put when you finish with the object. |
| 403 | *---------------------------------------------------------------*/ |
| 404 | |
| 405 | /* |
| 406 | * DM_ANY_MINOR chooses the next available minor number. |
| 407 | */ |
| 408 | #define DM_ANY_MINOR (-1) |
| 409 | int dm_create(int minor, struct mapped_device **md); |
| 410 | |
| 411 | /* |
| 412 | * Reference counting for md. |
| 413 | */ |
| 414 | struct mapped_device *dm_get_md(dev_t dev); |
| 415 | void dm_get(struct mapped_device *md); |
| 416 | int dm_hold(struct mapped_device *md); |
| 417 | void dm_put(struct mapped_device *md); |
| 418 | |
| 419 | /* |
| 420 | * An arbitrary pointer may be stored alongside a mapped device. |
| 421 | */ |
| 422 | void dm_set_mdptr(struct mapped_device *md, void *ptr); |
| 423 | void *dm_get_mdptr(struct mapped_device *md); |
| 424 | |
| 425 | /* |
| 426 | * A device can still be used while suspended, but I/O is deferred. |
| 427 | */ |
| 428 | int dm_suspend(struct mapped_device *md, unsigned suspend_flags); |
| 429 | int dm_resume(struct mapped_device *md); |
| 430 | |
| 431 | /* |
| 432 | * Event functions. |
| 433 | */ |
| 434 | uint32_t dm_get_event_nr(struct mapped_device *md); |
| 435 | int dm_wait_event(struct mapped_device *md, int event_nr); |
| 436 | uint32_t dm_next_uevent_seq(struct mapped_device *md); |
| 437 | void dm_uevent_add(struct mapped_device *md, struct list_head *elist); |
| 438 | |
| 439 | /* |
| 440 | * Info functions. |
| 441 | */ |
| 442 | const char *dm_device_name(struct mapped_device *md); |
| 443 | int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); |
| 444 | struct gendisk *dm_disk(struct mapped_device *md); |
| 445 | int dm_suspended(struct dm_target *ti); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 446 | int dm_post_suspending(struct dm_target *ti); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 447 | int dm_noflush_suspending(struct dm_target *ti); |
| 448 | void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 449 | union map_info *dm_get_rq_mapinfo(struct request *rq); |
| 450 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 451 | #ifdef CONFIG_BLK_DEV_ZONED |
| 452 | struct dm_report_zones_args { |
| 453 | struct dm_target *tgt; |
| 454 | sector_t next_sector; |
| 455 | |
| 456 | void *orig_data; |
| 457 | report_zones_cb orig_cb; |
| 458 | unsigned int zone_idx; |
| 459 | |
| 460 | /* must be filled by ->report_zones before calling dm_report_zones_cb */ |
| 461 | sector_t start; |
| 462 | }; |
| 463 | int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data); |
| 464 | #endif /* CONFIG_BLK_DEV_ZONED */ |
| 465 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 466 | /* |
| 467 | * Device mapper functions to parse and create devices specified by the |
| 468 | * parameter "dm-mod.create=" |
| 469 | */ |
| 470 | int __init dm_early_create(struct dm_ioctl *dmi, |
| 471 | struct dm_target_spec **spec_array, |
| 472 | char **target_params_array); |
| 473 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 474 | struct queue_limits *dm_get_queue_limits(struct mapped_device *md); |
| 475 | |
| 476 | /* |
| 477 | * Geometry functions. |
| 478 | */ |
| 479 | int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); |
| 480 | int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); |
| 481 | |
| 482 | /*----------------------------------------------------------------- |
| 483 | * Functions for manipulating device-mapper tables. |
| 484 | *---------------------------------------------------------------*/ |
| 485 | |
| 486 | /* |
| 487 | * First create an empty table. |
| 488 | */ |
| 489 | int dm_table_create(struct dm_table **result, fmode_t mode, |
| 490 | unsigned num_targets, struct mapped_device *md); |
| 491 | |
| 492 | /* |
| 493 | * Then call this once for each target. |
| 494 | */ |
| 495 | int dm_table_add_target(struct dm_table *t, const char *type, |
| 496 | sector_t start, sector_t len, char *params); |
| 497 | |
| 498 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 499 | * Target can use this to set the table's type. |
| 500 | * Can only ever be called from a target's ctr. |
| 501 | * Useful for "hybrid" target (supports both bio-based |
| 502 | * and request-based). |
| 503 | */ |
| 504 | void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); |
| 505 | |
| 506 | /* |
| 507 | * Finally call this to make the table ready for use. |
| 508 | */ |
| 509 | int dm_table_complete(struct dm_table *t); |
| 510 | |
| 511 | /* |
| 512 | * Destroy the table when finished. |
| 513 | */ |
| 514 | void dm_table_destroy(struct dm_table *t); |
| 515 | |
| 516 | /* |
| 517 | * Target may require that it is never sent I/O larger than len. |
| 518 | */ |
| 519 | int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); |
| 520 | |
| 521 | /* |
| 522 | * Table reference counting. |
| 523 | */ |
| 524 | struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); |
| 525 | void dm_put_live_table(struct mapped_device *md, int srcu_idx); |
| 526 | void dm_sync_table(struct mapped_device *md); |
| 527 | |
| 528 | /* |
| 529 | * Queries |
| 530 | */ |
| 531 | sector_t dm_table_get_size(struct dm_table *t); |
| 532 | unsigned int dm_table_get_num_targets(struct dm_table *t); |
| 533 | fmode_t dm_table_get_mode(struct dm_table *t); |
| 534 | struct mapped_device *dm_table_get_md(struct dm_table *t); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 535 | const char *dm_table_device_name(struct dm_table *t); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 536 | |
| 537 | /* |
| 538 | * Trigger an event. |
| 539 | */ |
| 540 | void dm_table_event(struct dm_table *t); |
| 541 | |
| 542 | /* |
| 543 | * Run the queue for request-based targets. |
| 544 | */ |
| 545 | void dm_table_run_md_queue_async(struct dm_table *t); |
| 546 | |
| 547 | /* |
| 548 | * The device must be suspended before calling this method. |
| 549 | * Returns the previous table, which the caller must destroy. |
| 550 | */ |
| 551 | struct dm_table *dm_swap_table(struct mapped_device *md, |
| 552 | struct dm_table *t); |
| 553 | |
| 554 | /* |
| 555 | * A wrapper around vmalloc. |
| 556 | */ |
| 557 | void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); |
| 558 | |
| 559 | /*----------------------------------------------------------------- |
| 560 | * Macros. |
| 561 | *---------------------------------------------------------------*/ |
| 562 | #define DM_NAME "device-mapper" |
| 563 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 564 | #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" |
| 565 | |
| 566 | #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) |
| 567 | |
| 568 | #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 569 | #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 570 | #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 571 | #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 572 | #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 573 | #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 574 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 575 | #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 576 | #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 577 | |
| 578 | #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ |
| 579 | 0 : scnprintf(result + sz, maxlen - sz, x)) |
| 580 | |
| 581 | /* |
| 582 | * Definitions of return values from target end_io function. |
| 583 | */ |
| 584 | #define DM_ENDIO_DONE 0 |
| 585 | #define DM_ENDIO_INCOMPLETE 1 |
| 586 | #define DM_ENDIO_REQUEUE 2 |
| 587 | #define DM_ENDIO_DELAY_REQUEUE 3 |
| 588 | |
| 589 | /* |
| 590 | * Definitions of return values from target map function. |
| 591 | */ |
| 592 | #define DM_MAPIO_SUBMITTED 0 |
| 593 | #define DM_MAPIO_REMAPPED 1 |
| 594 | #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE |
| 595 | #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE |
| 596 | #define DM_MAPIO_KILL 4 |
| 597 | |
| 598 | #define dm_sector_div64(x, y)( \ |
| 599 | { \ |
| 600 | u64 _res; \ |
| 601 | (x) = div64_u64_rem(x, y, &_res); \ |
| 602 | _res; \ |
| 603 | } \ |
| 604 | ) |
| 605 | |
| 606 | /* |
| 607 | * Ceiling(n / sz) |
| 608 | */ |
| 609 | #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) |
| 610 | |
| 611 | #define dm_sector_div_up(n, sz) ( \ |
| 612 | { \ |
| 613 | sector_t _r = ((n) + (sz) - 1); \ |
| 614 | sector_div(_r, (sz)); \ |
| 615 | _r; \ |
| 616 | } \ |
| 617 | ) |
| 618 | |
| 619 | /* |
| 620 | * ceiling(n / size) * size |
| 621 | */ |
| 622 | #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) |
| 623 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 624 | /* |
| 625 | * Sector offset taken relative to the start of the target instead of |
| 626 | * relative to the start of the device. |
| 627 | */ |
| 628 | #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) |
| 629 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 630 | static inline sector_t to_sector(unsigned long long n) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 631 | { |
| 632 | return (n >> SECTOR_SHIFT); |
| 633 | } |
| 634 | |
| 635 | static inline unsigned long to_bytes(sector_t n) |
| 636 | { |
| 637 | return (n << SECTOR_SHIFT); |
| 638 | } |
| 639 | |
| 640 | #endif /* _LINUX_DEVICE_MAPPER_H */ |