David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * scsi.c Copyright (C) 1992 Drew Eckhardt |
| 4 | * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale |
| 5 | * Copyright (C) 2002, 2003 Christoph Hellwig |
| 6 | * |
| 7 | * generic mid-level SCSI driver |
| 8 | * Initial versions: Drew Eckhardt |
| 9 | * Subsequent revisions: Eric Youngdale |
| 10 | * |
| 11 | * <drew@colorado.edu> |
| 12 | * |
| 13 | * Bug correction thanks go to : |
| 14 | * Rik Faith <faith@cs.unc.edu> |
| 15 | * Tommy Thorn <tthorn> |
| 16 | * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de> |
| 17 | * |
| 18 | * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to |
| 19 | * add scatter-gather, multiple outstanding request, and other |
| 20 | * enhancements. |
| 21 | * |
| 22 | * Native multichannel, wide scsi, /proc/scsi and hot plugging |
| 23 | * support added by Michael Neuffer <mike@i-connect.net> |
| 24 | * |
| 25 | * Added request_module("scsi_hostadapter") for kerneld: |
| 26 | * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) |
| 27 | * Bjorn Ekwall <bj0rn@blox.se> |
| 28 | * (changed to kmod) |
| 29 | * |
| 30 | * Major improvements to the timeout, abort, and reset processing, |
| 31 | * as well as performance modifications for large queue depths by |
| 32 | * Leonard N. Zubkoff <lnz@dandelion.com> |
| 33 | * |
| 34 | * Converted cli() code to spinlocks, Ingo Molnar |
| 35 | * |
| 36 | * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli |
| 37 | * |
| 38 | * out_of_space hacks, D. Gilbert (dpg) 990608 |
| 39 | */ |
| 40 | |
| 41 | #include <linux/module.h> |
| 42 | #include <linux/moduleparam.h> |
| 43 | #include <linux/kernel.h> |
| 44 | #include <linux/timer.h> |
| 45 | #include <linux/string.h> |
| 46 | #include <linux/slab.h> |
| 47 | #include <linux/blkdev.h> |
| 48 | #include <linux/delay.h> |
| 49 | #include <linux/init.h> |
| 50 | #include <linux/completion.h> |
| 51 | #include <linux/unistd.h> |
| 52 | #include <linux/spinlock.h> |
| 53 | #include <linux/kmod.h> |
| 54 | #include <linux/interrupt.h> |
| 55 | #include <linux/notifier.h> |
| 56 | #include <linux/cpu.h> |
| 57 | #include <linux/mutex.h> |
| 58 | #include <linux/async.h> |
| 59 | #include <asm/unaligned.h> |
| 60 | |
| 61 | #include <scsi/scsi.h> |
| 62 | #include <scsi/scsi_cmnd.h> |
| 63 | #include <scsi/scsi_dbg.h> |
| 64 | #include <scsi/scsi_device.h> |
| 65 | #include <scsi/scsi_driver.h> |
| 66 | #include <scsi/scsi_eh.h> |
| 67 | #include <scsi/scsi_host.h> |
| 68 | #include <scsi/scsi_tcq.h> |
| 69 | |
| 70 | #include "scsi_priv.h" |
| 71 | #include "scsi_logging.h" |
| 72 | |
| 73 | #define CREATE_TRACE_POINTS |
| 74 | #include <trace/events/scsi.h> |
| 75 | |
| 76 | /* |
| 77 | * Definitions and constants. |
| 78 | */ |
| 79 | |
| 80 | /* |
| 81 | * Note - the initial logging level can be set here to log events at boot time. |
| 82 | * After the system is up, you may enable logging via the /proc interface. |
| 83 | */ |
| 84 | unsigned int scsi_logging_level; |
| 85 | #if defined(CONFIG_SCSI_LOGGING) |
| 86 | EXPORT_SYMBOL(scsi_logging_level); |
| 87 | #endif |
| 88 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 89 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 90 | * Domain for asynchronous system resume operations. It is marked 'exclusive' |
| 91 | * to avoid being included in the async_synchronize_full() that is invoked by |
| 92 | * dpm_resume(). |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 93 | */ |
| 94 | ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain); |
| 95 | EXPORT_SYMBOL(scsi_sd_pm_domain); |
| 96 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 97 | #ifdef CONFIG_SCSI_LOGGING |
| 98 | void scsi_log_send(struct scsi_cmnd *cmd) |
| 99 | { |
| 100 | unsigned int level; |
| 101 | |
| 102 | /* |
| 103 | * If ML QUEUE log level is greater than or equal to: |
| 104 | * |
| 105 | * 1: nothing (match completion) |
| 106 | * |
| 107 | * 2: log opcode + command of all commands + cmd address |
| 108 | * |
| 109 | * 3: same as 2 |
| 110 | * |
| 111 | * 4: same as 3 |
| 112 | */ |
| 113 | if (unlikely(scsi_logging_level)) { |
| 114 | level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, |
| 115 | SCSI_LOG_MLQUEUE_BITS); |
| 116 | if (level > 1) { |
| 117 | scmd_printk(KERN_INFO, cmd, |
| 118 | "Send: scmd 0x%p\n", cmd); |
| 119 | scsi_print_command(cmd); |
| 120 | } |
| 121 | } |
| 122 | } |
| 123 | |
| 124 | void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) |
| 125 | { |
| 126 | unsigned int level; |
| 127 | |
| 128 | /* |
| 129 | * If ML COMPLETE log level is greater than or equal to: |
| 130 | * |
| 131 | * 1: log disposition, result, opcode + command, and conditionally |
| 132 | * sense data for failures or non SUCCESS dispositions. |
| 133 | * |
| 134 | * 2: same as 1 but for all command completions. |
| 135 | * |
| 136 | * 3: same as 2 |
| 137 | * |
| 138 | * 4: same as 3 plus dump extra junk |
| 139 | */ |
| 140 | if (unlikely(scsi_logging_level)) { |
| 141 | level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, |
| 142 | SCSI_LOG_MLCOMPLETE_BITS); |
| 143 | if (((level > 0) && (cmd->result || disposition != SUCCESS)) || |
| 144 | (level > 1)) { |
| 145 | scsi_print_result(cmd, "Done", disposition); |
| 146 | scsi_print_command(cmd); |
| 147 | if (status_byte(cmd->result) == CHECK_CONDITION) |
| 148 | scsi_print_sense(cmd); |
| 149 | if (level > 3) |
| 150 | scmd_printk(KERN_INFO, cmd, |
| 151 | "scsi host busy %d failed %d\n", |
| 152 | scsi_host_busy(cmd->device->host), |
| 153 | cmd->device->host->host_failed); |
| 154 | } |
| 155 | } |
| 156 | } |
| 157 | #endif |
| 158 | |
| 159 | /** |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 160 | * scsi_finish_command - cleanup and pass command back to upper layer |
| 161 | * @cmd: the command |
| 162 | * |
| 163 | * Description: Pass command off to upper layer for finishing of I/O |
| 164 | * request, waking processes that are waiting on results, |
| 165 | * etc. |
| 166 | */ |
| 167 | void scsi_finish_command(struct scsi_cmnd *cmd) |
| 168 | { |
| 169 | struct scsi_device *sdev = cmd->device; |
| 170 | struct scsi_target *starget = scsi_target(sdev); |
| 171 | struct Scsi_Host *shost = sdev->host; |
| 172 | struct scsi_driver *drv; |
| 173 | unsigned int good_bytes; |
| 174 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 175 | scsi_device_unbusy(sdev, cmd); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 176 | |
| 177 | /* |
| 178 | * Clear the flags that say that the device/target/host is no longer |
| 179 | * capable of accepting new commands. |
| 180 | */ |
| 181 | if (atomic_read(&shost->host_blocked)) |
| 182 | atomic_set(&shost->host_blocked, 0); |
| 183 | if (atomic_read(&starget->target_blocked)) |
| 184 | atomic_set(&starget->target_blocked, 0); |
| 185 | if (atomic_read(&sdev->device_blocked)) |
| 186 | atomic_set(&sdev->device_blocked, 0); |
| 187 | |
| 188 | /* |
| 189 | * If we have valid sense information, then some kind of recovery |
| 190 | * must have taken place. Make a note of this. |
| 191 | */ |
| 192 | if (SCSI_SENSE_VALID(cmd)) |
| 193 | cmd->result |= (DRIVER_SENSE << 24); |
| 194 | |
| 195 | SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, |
| 196 | "Notifying upper driver of completion " |
| 197 | "(result %x)\n", cmd->result)); |
| 198 | |
| 199 | good_bytes = scsi_bufflen(cmd); |
| 200 | if (!blk_rq_is_passthrough(cmd->request)) { |
| 201 | int old_good_bytes = good_bytes; |
| 202 | drv = scsi_cmd_to_driver(cmd); |
| 203 | if (drv->done) |
| 204 | good_bytes = drv->done(cmd); |
| 205 | /* |
| 206 | * USB may not give sense identifying bad sector and |
| 207 | * simply return a residue instead, so subtract off the |
| 208 | * residue if drv->done() error processing indicates no |
| 209 | * change to the completion length. |
| 210 | */ |
| 211 | if (good_bytes == old_good_bytes) |
| 212 | good_bytes -= scsi_get_resid(cmd); |
| 213 | } |
| 214 | scsi_io_completion(cmd, good_bytes); |
| 215 | } |
| 216 | |
| 217 | /** |
| 218 | * scsi_change_queue_depth - change a device's queue depth |
| 219 | * @sdev: SCSI Device in question |
| 220 | * @depth: number of commands allowed to be queued to the driver |
| 221 | * |
| 222 | * Sets the device queue depth and returns the new value. |
| 223 | */ |
| 224 | int scsi_change_queue_depth(struct scsi_device *sdev, int depth) |
| 225 | { |
| 226 | if (depth > 0) { |
| 227 | sdev->queue_depth = depth; |
| 228 | wmb(); |
| 229 | } |
| 230 | |
| 231 | if (sdev->request_queue) |
| 232 | blk_set_queue_depth(sdev->request_queue, depth); |
| 233 | |
| 234 | return sdev->queue_depth; |
| 235 | } |
| 236 | EXPORT_SYMBOL(scsi_change_queue_depth); |
| 237 | |
| 238 | /** |
| 239 | * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth |
| 240 | * @sdev: SCSI Device in question |
| 241 | * @depth: Current number of outstanding SCSI commands on this device, |
| 242 | * not counting the one returned as QUEUE_FULL. |
| 243 | * |
| 244 | * Description: This function will track successive QUEUE_FULL events on a |
| 245 | * specific SCSI device to determine if and when there is a |
| 246 | * need to adjust the queue depth on the device. |
| 247 | * |
| 248 | * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth, |
| 249 | * -1 - Drop back to untagged operation using host->cmd_per_lun |
| 250 | * as the untagged command depth |
| 251 | * |
| 252 | * Lock Status: None held on entry |
| 253 | * |
| 254 | * Notes: Low level drivers may call this at any time and we will do |
| 255 | * "The Right Thing." We are interrupt context safe. |
| 256 | */ |
| 257 | int scsi_track_queue_full(struct scsi_device *sdev, int depth) |
| 258 | { |
| 259 | |
| 260 | /* |
| 261 | * Don't let QUEUE_FULLs on the same |
| 262 | * jiffies count, they could all be from |
| 263 | * same event. |
| 264 | */ |
| 265 | if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4)) |
| 266 | return 0; |
| 267 | |
| 268 | sdev->last_queue_full_time = jiffies; |
| 269 | if (sdev->last_queue_full_depth != depth) { |
| 270 | sdev->last_queue_full_count = 1; |
| 271 | sdev->last_queue_full_depth = depth; |
| 272 | } else { |
| 273 | sdev->last_queue_full_count++; |
| 274 | } |
| 275 | |
| 276 | if (sdev->last_queue_full_count <= 10) |
| 277 | return 0; |
| 278 | |
| 279 | return scsi_change_queue_depth(sdev, depth); |
| 280 | } |
| 281 | EXPORT_SYMBOL(scsi_track_queue_full); |
| 282 | |
| 283 | /** |
| 284 | * scsi_vpd_inquiry - Request a device provide us with a VPD page |
| 285 | * @sdev: The device to ask |
| 286 | * @buffer: Where to put the result |
| 287 | * @page: Which Vital Product Data to return |
| 288 | * @len: The length of the buffer |
| 289 | * |
| 290 | * This is an internal helper function. You probably want to use |
| 291 | * scsi_get_vpd_page instead. |
| 292 | * |
| 293 | * Returns size of the vpd page on success or a negative error number. |
| 294 | */ |
| 295 | static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, |
| 296 | u8 page, unsigned len) |
| 297 | { |
| 298 | int result; |
| 299 | unsigned char cmd[16]; |
| 300 | |
| 301 | if (len < 4) |
| 302 | return -EINVAL; |
| 303 | |
| 304 | cmd[0] = INQUIRY; |
| 305 | cmd[1] = 1; /* EVPD */ |
| 306 | cmd[2] = page; |
| 307 | cmd[3] = len >> 8; |
| 308 | cmd[4] = len & 0xff; |
| 309 | cmd[5] = 0; /* Control byte */ |
| 310 | |
| 311 | /* |
| 312 | * I'm not convinced we need to try quite this hard to get VPD, but |
| 313 | * all the existing users tried this hard. |
| 314 | */ |
| 315 | result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, |
| 316 | len, NULL, 30 * HZ, 3, NULL); |
| 317 | if (result) |
| 318 | return -EIO; |
| 319 | |
| 320 | /* Sanity check that we got the page back that we asked for */ |
| 321 | if (buffer[1] != page) |
| 322 | return -EIO; |
| 323 | |
| 324 | return get_unaligned_be16(&buffer[2]) + 4; |
| 325 | } |
| 326 | |
| 327 | /** |
| 328 | * scsi_get_vpd_page - Get Vital Product Data from a SCSI device |
| 329 | * @sdev: The device to ask |
| 330 | * @page: Which Vital Product Data to return |
| 331 | * @buf: where to store the VPD |
| 332 | * @buf_len: number of bytes in the VPD buffer area |
| 333 | * |
| 334 | * SCSI devices may optionally supply Vital Product Data. Each 'page' |
| 335 | * of VPD is defined in the appropriate SCSI document (eg SPC, SBC). |
| 336 | * If the device supports this VPD page, this routine returns a pointer |
| 337 | * to a buffer containing the data from that page. The caller is |
| 338 | * responsible for calling kfree() on this pointer when it is no longer |
| 339 | * needed. If we cannot retrieve the VPD page this routine returns %NULL. |
| 340 | */ |
| 341 | int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, |
| 342 | int buf_len) |
| 343 | { |
| 344 | int i, result; |
| 345 | |
| 346 | if (sdev->skip_vpd_pages) |
| 347 | goto fail; |
| 348 | |
| 349 | /* Ask for all the pages supported by this device */ |
| 350 | result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); |
| 351 | if (result < 4) |
| 352 | goto fail; |
| 353 | |
| 354 | /* If the user actually wanted this page, we can skip the rest */ |
| 355 | if (page == 0) |
| 356 | return 0; |
| 357 | |
| 358 | for (i = 4; i < min(result, buf_len); i++) |
| 359 | if (buf[i] == page) |
| 360 | goto found; |
| 361 | |
| 362 | if (i < result && i >= buf_len) |
| 363 | /* ran off the end of the buffer, give us benefit of doubt */ |
| 364 | goto found; |
| 365 | /* The device claims it doesn't support the requested page */ |
| 366 | goto fail; |
| 367 | |
| 368 | found: |
| 369 | result = scsi_vpd_inquiry(sdev, buf, page, buf_len); |
| 370 | if (result < 0) |
| 371 | goto fail; |
| 372 | |
| 373 | return 0; |
| 374 | |
| 375 | fail: |
| 376 | return -EINVAL; |
| 377 | } |
| 378 | EXPORT_SYMBOL_GPL(scsi_get_vpd_page); |
| 379 | |
| 380 | /** |
| 381 | * scsi_get_vpd_buf - Get Vital Product Data from a SCSI device |
| 382 | * @sdev: The device to ask |
| 383 | * @page: Which Vital Product Data to return |
| 384 | * |
| 385 | * Returns %NULL upon failure. |
| 386 | */ |
| 387 | static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page) |
| 388 | { |
| 389 | struct scsi_vpd *vpd_buf; |
| 390 | int vpd_len = SCSI_VPD_PG_LEN, result; |
| 391 | |
| 392 | retry_pg: |
| 393 | vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL); |
| 394 | if (!vpd_buf) |
| 395 | return NULL; |
| 396 | |
| 397 | result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len); |
| 398 | if (result < 0) { |
| 399 | kfree(vpd_buf); |
| 400 | return NULL; |
| 401 | } |
| 402 | if (result > vpd_len) { |
| 403 | vpd_len = result; |
| 404 | kfree(vpd_buf); |
| 405 | goto retry_pg; |
| 406 | } |
| 407 | |
| 408 | vpd_buf->len = result; |
| 409 | |
| 410 | return vpd_buf; |
| 411 | } |
| 412 | |
| 413 | static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page, |
| 414 | struct scsi_vpd __rcu **sdev_vpd_buf) |
| 415 | { |
| 416 | struct scsi_vpd *vpd_buf; |
| 417 | |
| 418 | vpd_buf = scsi_get_vpd_buf(sdev, page); |
| 419 | if (!vpd_buf) |
| 420 | return; |
| 421 | |
| 422 | mutex_lock(&sdev->inquiry_mutex); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 423 | vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf, |
| 424 | lockdep_is_held(&sdev->inquiry_mutex)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 425 | mutex_unlock(&sdev->inquiry_mutex); |
| 426 | |
| 427 | if (vpd_buf) |
| 428 | kfree_rcu(vpd_buf, rcu); |
| 429 | } |
| 430 | |
| 431 | /** |
| 432 | * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure |
| 433 | * @sdev: The device to ask |
| 434 | * |
| 435 | * Attach the 'Device Identification' VPD page (0x83) and the |
| 436 | * 'Unit Serial Number' VPD page (0x80) to a SCSI device |
| 437 | * structure. This information can be used to identify the device |
| 438 | * uniquely. |
| 439 | */ |
| 440 | void scsi_attach_vpd(struct scsi_device *sdev) |
| 441 | { |
| 442 | int i; |
| 443 | struct scsi_vpd *vpd_buf; |
| 444 | |
| 445 | if (!scsi_device_supports_vpd(sdev)) |
| 446 | return; |
| 447 | |
| 448 | /* Ask for all the pages supported by this device */ |
| 449 | vpd_buf = scsi_get_vpd_buf(sdev, 0); |
| 450 | if (!vpd_buf) |
| 451 | return; |
| 452 | |
| 453 | for (i = 4; i < vpd_buf->len; i++) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 454 | if (vpd_buf->data[i] == 0x0) |
| 455 | scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 456 | if (vpd_buf->data[i] == 0x80) |
| 457 | scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80); |
| 458 | if (vpd_buf->data[i] == 0x83) |
| 459 | scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 460 | if (vpd_buf->data[i] == 0x89) |
| 461 | scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 462 | } |
| 463 | kfree(vpd_buf); |
| 464 | } |
| 465 | |
| 466 | /** |
| 467 | * scsi_report_opcode - Find out if a given command opcode is supported |
| 468 | * @sdev: scsi device to query |
| 469 | * @buffer: scratch buffer (must be at least 20 bytes long) |
| 470 | * @len: length of buffer |
| 471 | * @opcode: opcode for command to look up |
| 472 | * |
| 473 | * Uses the REPORT SUPPORTED OPERATION CODES to look up the given |
| 474 | * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is |
| 475 | * unsupported and 1 if the device claims to support the command. |
| 476 | */ |
| 477 | int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, |
| 478 | unsigned int len, unsigned char opcode) |
| 479 | { |
| 480 | unsigned char cmd[16]; |
| 481 | struct scsi_sense_hdr sshdr; |
| 482 | int result; |
| 483 | |
| 484 | if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) |
| 485 | return -EINVAL; |
| 486 | |
| 487 | memset(cmd, 0, 16); |
| 488 | cmd[0] = MAINTENANCE_IN; |
| 489 | cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; |
| 490 | cmd[2] = 1; /* One command format */ |
| 491 | cmd[3] = opcode; |
| 492 | put_unaligned_be32(len, &cmd[6]); |
| 493 | memset(buffer, 0, len); |
| 494 | |
| 495 | result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, |
| 496 | &sshdr, 30 * HZ, 3, NULL); |
| 497 | |
| 498 | if (result && scsi_sense_valid(&sshdr) && |
| 499 | sshdr.sense_key == ILLEGAL_REQUEST && |
| 500 | (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) |
| 501 | return -EINVAL; |
| 502 | |
| 503 | if ((buffer[1] & 3) == 3) /* Command supported */ |
| 504 | return 1; |
| 505 | |
| 506 | return 0; |
| 507 | } |
| 508 | EXPORT_SYMBOL(scsi_report_opcode); |
| 509 | |
| 510 | /** |
| 511 | * scsi_device_get - get an additional reference to a scsi_device |
| 512 | * @sdev: device to get a reference to |
| 513 | * |
| 514 | * Description: Gets a reference to the scsi_device and increments the use count |
| 515 | * of the underlying LLDD module. You must hold host_lock of the |
| 516 | * parent Scsi_Host or already have a reference when calling this. |
| 517 | * |
| 518 | * This will fail if a device is deleted or cancelled, or when the LLD module |
| 519 | * is in the process of being unloaded. |
| 520 | */ |
| 521 | int scsi_device_get(struct scsi_device *sdev) |
| 522 | { |
| 523 | if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) |
| 524 | goto fail; |
| 525 | if (!get_device(&sdev->sdev_gendev)) |
| 526 | goto fail; |
| 527 | if (!try_module_get(sdev->host->hostt->module)) |
| 528 | goto fail_put_device; |
| 529 | return 0; |
| 530 | |
| 531 | fail_put_device: |
| 532 | put_device(&sdev->sdev_gendev); |
| 533 | fail: |
| 534 | return -ENXIO; |
| 535 | } |
| 536 | EXPORT_SYMBOL(scsi_device_get); |
| 537 | |
| 538 | /** |
| 539 | * scsi_device_put - release a reference to a scsi_device |
| 540 | * @sdev: device to release a reference on. |
| 541 | * |
| 542 | * Description: Release a reference to the scsi_device and decrements the use |
| 543 | * count of the underlying LLDD module. The device is freed once the last |
| 544 | * user vanishes. |
| 545 | */ |
| 546 | void scsi_device_put(struct scsi_device *sdev) |
| 547 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 548 | struct module *mod = sdev->host->hostt->module; |
| 549 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 550 | put_device(&sdev->sdev_gendev); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 551 | module_put(mod); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 552 | } |
| 553 | EXPORT_SYMBOL(scsi_device_put); |
| 554 | |
| 555 | /* helper for shost_for_each_device, see that for documentation */ |
| 556 | struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, |
| 557 | struct scsi_device *prev) |
| 558 | { |
| 559 | struct list_head *list = (prev ? &prev->siblings : &shost->__devices); |
| 560 | struct scsi_device *next = NULL; |
| 561 | unsigned long flags; |
| 562 | |
| 563 | spin_lock_irqsave(shost->host_lock, flags); |
| 564 | while (list->next != &shost->__devices) { |
| 565 | next = list_entry(list->next, struct scsi_device, siblings); |
| 566 | /* skip devices that we can't get a reference to */ |
| 567 | if (!scsi_device_get(next)) |
| 568 | break; |
| 569 | next = NULL; |
| 570 | list = list->next; |
| 571 | } |
| 572 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 573 | |
| 574 | if (prev) |
| 575 | scsi_device_put(prev); |
| 576 | return next; |
| 577 | } |
| 578 | EXPORT_SYMBOL(__scsi_iterate_devices); |
| 579 | |
| 580 | /** |
| 581 | * starget_for_each_device - helper to walk all devices of a target |
| 582 | * @starget: target whose devices we want to iterate over. |
| 583 | * @data: Opaque passed to each function call. |
| 584 | * @fn: Function to call on each device |
| 585 | * |
| 586 | * This traverses over each device of @starget. The devices have |
| 587 | * a reference that must be released by scsi_host_put when breaking |
| 588 | * out of the loop. |
| 589 | */ |
| 590 | void starget_for_each_device(struct scsi_target *starget, void *data, |
| 591 | void (*fn)(struct scsi_device *, void *)) |
| 592 | { |
| 593 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
| 594 | struct scsi_device *sdev; |
| 595 | |
| 596 | shost_for_each_device(sdev, shost) { |
| 597 | if ((sdev->channel == starget->channel) && |
| 598 | (sdev->id == starget->id)) |
| 599 | fn(sdev, data); |
| 600 | } |
| 601 | } |
| 602 | EXPORT_SYMBOL(starget_for_each_device); |
| 603 | |
| 604 | /** |
| 605 | * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED) |
| 606 | * @starget: target whose devices we want to iterate over. |
| 607 | * @data: parameter for callback @fn() |
| 608 | * @fn: callback function that is invoked for each device |
| 609 | * |
| 610 | * This traverses over each device of @starget. It does _not_ |
| 611 | * take a reference on the scsi_device, so the whole loop must be |
| 612 | * protected by shost->host_lock. |
| 613 | * |
| 614 | * Note: The only reason why drivers would want to use this is because |
| 615 | * they need to access the device list in irq context. Otherwise you |
| 616 | * really want to use starget_for_each_device instead. |
| 617 | **/ |
| 618 | void __starget_for_each_device(struct scsi_target *starget, void *data, |
| 619 | void (*fn)(struct scsi_device *, void *)) |
| 620 | { |
| 621 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
| 622 | struct scsi_device *sdev; |
| 623 | |
| 624 | __shost_for_each_device(sdev, shost) { |
| 625 | if ((sdev->channel == starget->channel) && |
| 626 | (sdev->id == starget->id)) |
| 627 | fn(sdev, data); |
| 628 | } |
| 629 | } |
| 630 | EXPORT_SYMBOL(__starget_for_each_device); |
| 631 | |
| 632 | /** |
| 633 | * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) |
| 634 | * @starget: SCSI target pointer |
| 635 | * @lun: SCSI Logical Unit Number |
| 636 | * |
| 637 | * Description: Looks up the scsi_device with the specified @lun for a given |
| 638 | * @starget. The returned scsi_device does not have an additional |
| 639 | * reference. You must hold the host's host_lock over this call and |
| 640 | * any access to the returned scsi_device. A scsi_device in state |
| 641 | * SDEV_DEL is skipped. |
| 642 | * |
| 643 | * Note: The only reason why drivers should use this is because |
| 644 | * they need to access the device list in irq context. Otherwise you |
| 645 | * really want to use scsi_device_lookup_by_target instead. |
| 646 | **/ |
| 647 | struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, |
| 648 | u64 lun) |
| 649 | { |
| 650 | struct scsi_device *sdev; |
| 651 | |
| 652 | list_for_each_entry(sdev, &starget->devices, same_target_siblings) { |
| 653 | if (sdev->sdev_state == SDEV_DEL) |
| 654 | continue; |
| 655 | if (sdev->lun ==lun) |
| 656 | return sdev; |
| 657 | } |
| 658 | |
| 659 | return NULL; |
| 660 | } |
| 661 | EXPORT_SYMBOL(__scsi_device_lookup_by_target); |
| 662 | |
| 663 | /** |
| 664 | * scsi_device_lookup_by_target - find a device given the target |
| 665 | * @starget: SCSI target pointer |
| 666 | * @lun: SCSI Logical Unit Number |
| 667 | * |
| 668 | * Description: Looks up the scsi_device with the specified @lun for a given |
| 669 | * @starget. The returned scsi_device has an additional reference that |
| 670 | * needs to be released with scsi_device_put once you're done with it. |
| 671 | **/ |
| 672 | struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, |
| 673 | u64 lun) |
| 674 | { |
| 675 | struct scsi_device *sdev; |
| 676 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
| 677 | unsigned long flags; |
| 678 | |
| 679 | spin_lock_irqsave(shost->host_lock, flags); |
| 680 | sdev = __scsi_device_lookup_by_target(starget, lun); |
| 681 | if (sdev && scsi_device_get(sdev)) |
| 682 | sdev = NULL; |
| 683 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 684 | |
| 685 | return sdev; |
| 686 | } |
| 687 | EXPORT_SYMBOL(scsi_device_lookup_by_target); |
| 688 | |
| 689 | /** |
| 690 | * __scsi_device_lookup - find a device given the host (UNLOCKED) |
| 691 | * @shost: SCSI host pointer |
| 692 | * @channel: SCSI channel (zero if only one channel) |
| 693 | * @id: SCSI target number (physical unit number) |
| 694 | * @lun: SCSI Logical Unit Number |
| 695 | * |
| 696 | * Description: Looks up the scsi_device with the specified @channel, @id, @lun |
| 697 | * for a given host. The returned scsi_device does not have an additional |
| 698 | * reference. You must hold the host's host_lock over this call and any access |
| 699 | * to the returned scsi_device. |
| 700 | * |
| 701 | * Note: The only reason why drivers would want to use this is because |
| 702 | * they need to access the device list in irq context. Otherwise you |
| 703 | * really want to use scsi_device_lookup instead. |
| 704 | **/ |
| 705 | struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, |
| 706 | uint channel, uint id, u64 lun) |
| 707 | { |
| 708 | struct scsi_device *sdev; |
| 709 | |
| 710 | list_for_each_entry(sdev, &shost->__devices, siblings) { |
| 711 | if (sdev->sdev_state == SDEV_DEL) |
| 712 | continue; |
| 713 | if (sdev->channel == channel && sdev->id == id && |
| 714 | sdev->lun ==lun) |
| 715 | return sdev; |
| 716 | } |
| 717 | |
| 718 | return NULL; |
| 719 | } |
| 720 | EXPORT_SYMBOL(__scsi_device_lookup); |
| 721 | |
| 722 | /** |
| 723 | * scsi_device_lookup - find a device given the host |
| 724 | * @shost: SCSI host pointer |
| 725 | * @channel: SCSI channel (zero if only one channel) |
| 726 | * @id: SCSI target number (physical unit number) |
| 727 | * @lun: SCSI Logical Unit Number |
| 728 | * |
| 729 | * Description: Looks up the scsi_device with the specified @channel, @id, @lun |
| 730 | * for a given host. The returned scsi_device has an additional reference that |
| 731 | * needs to be released with scsi_device_put once you're done with it. |
| 732 | **/ |
| 733 | struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, |
| 734 | uint channel, uint id, u64 lun) |
| 735 | { |
| 736 | struct scsi_device *sdev; |
| 737 | unsigned long flags; |
| 738 | |
| 739 | spin_lock_irqsave(shost->host_lock, flags); |
| 740 | sdev = __scsi_device_lookup(shost, channel, id, lun); |
| 741 | if (sdev && scsi_device_get(sdev)) |
| 742 | sdev = NULL; |
| 743 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 744 | |
| 745 | return sdev; |
| 746 | } |
| 747 | EXPORT_SYMBOL(scsi_device_lookup); |
| 748 | |
| 749 | MODULE_DESCRIPTION("SCSI core"); |
| 750 | MODULE_LICENSE("GPL"); |
| 751 | |
| 752 | module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); |
| 753 | MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); |
| 754 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 755 | static int __init init_scsi(void) |
| 756 | { |
| 757 | int error; |
| 758 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 759 | error = scsi_init_procfs(); |
| 760 | if (error) |
| 761 | goto cleanup_queue; |
| 762 | error = scsi_init_devinfo(); |
| 763 | if (error) |
| 764 | goto cleanup_procfs; |
| 765 | error = scsi_init_hosts(); |
| 766 | if (error) |
| 767 | goto cleanup_devlist; |
| 768 | error = scsi_init_sysctl(); |
| 769 | if (error) |
| 770 | goto cleanup_hosts; |
| 771 | error = scsi_sysfs_register(); |
| 772 | if (error) |
| 773 | goto cleanup_sysctl; |
| 774 | |
| 775 | scsi_netlink_init(); |
| 776 | |
| 777 | printk(KERN_NOTICE "SCSI subsystem initialized\n"); |
| 778 | return 0; |
| 779 | |
| 780 | cleanup_sysctl: |
| 781 | scsi_exit_sysctl(); |
| 782 | cleanup_hosts: |
| 783 | scsi_exit_hosts(); |
| 784 | cleanup_devlist: |
| 785 | scsi_exit_devinfo(); |
| 786 | cleanup_procfs: |
| 787 | scsi_exit_procfs(); |
| 788 | cleanup_queue: |
| 789 | scsi_exit_queue(); |
| 790 | printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", |
| 791 | -error); |
| 792 | return error; |
| 793 | } |
| 794 | |
| 795 | static void __exit exit_scsi(void) |
| 796 | { |
| 797 | scsi_netlink_exit(); |
| 798 | scsi_sysfs_unregister(); |
| 799 | scsi_exit_sysctl(); |
| 800 | scsi_exit_hosts(); |
| 801 | scsi_exit_devinfo(); |
| 802 | scsi_exit_procfs(); |
| 803 | scsi_exit_queue(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 804 | } |
| 805 | |
| 806 | subsys_initcall(init_scsi); |
| 807 | module_exit(exit_scsi); |