David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Adaptec AAC series RAID controller driver |
| 4 | * (c) Copyright 2001 Red Hat Inc. |
| 5 | * |
| 6 | * based on the old aacraid driver that is.. |
| 7 | * Adaptec aacraid device driver for Linux. |
| 8 | * |
| 9 | * Copyright (c) 2000-2010 Adaptec, Inc. |
| 10 | * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) |
| 11 | * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) |
| 12 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 13 | * Module Name: |
| 14 | * commctrl.c |
| 15 | * |
| 16 | * Abstract: Contains all routines for control of the AFA comm layer |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 17 | */ |
| 18 | |
| 19 | #include <linux/kernel.h> |
| 20 | #include <linux/init.h> |
| 21 | #include <linux/types.h> |
| 22 | #include <linux/pci.h> |
| 23 | #include <linux/spinlock.h> |
| 24 | #include <linux/slab.h> |
| 25 | #include <linux/completion.h> |
| 26 | #include <linux/dma-mapping.h> |
| 27 | #include <linux/blkdev.h> |
| 28 | #include <linux/delay.h> /* ssleep prototype */ |
| 29 | #include <linux/kthread.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 30 | #include <linux/uaccess.h> |
| 31 | #include <scsi/scsi_host.h> |
| 32 | |
| 33 | #include "aacraid.h" |
| 34 | |
| 35 | /** |
| 36 | * ioctl_send_fib - send a FIB from userspace |
| 37 | * @dev: adapter is being processed |
| 38 | * @arg: arguments to the ioctl call |
| 39 | * |
| 40 | * This routine sends a fib to the adapter on behalf of a user level |
| 41 | * program. |
| 42 | */ |
| 43 | # define AAC_DEBUG_PREAMBLE KERN_INFO |
| 44 | # define AAC_DEBUG_POSTAMBLE |
| 45 | |
| 46 | static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) |
| 47 | { |
| 48 | struct hw_fib * kfib; |
| 49 | struct fib *fibptr; |
| 50 | struct hw_fib * hw_fib = (struct hw_fib *)0; |
| 51 | dma_addr_t hw_fib_pa = (dma_addr_t)0LL; |
| 52 | unsigned int size, osize; |
| 53 | int retval; |
| 54 | |
| 55 | if (dev->in_reset) { |
| 56 | return -EBUSY; |
| 57 | } |
| 58 | fibptr = aac_fib_alloc(dev); |
| 59 | if(fibptr == NULL) { |
| 60 | return -ENOMEM; |
| 61 | } |
| 62 | |
| 63 | kfib = fibptr->hw_fib_va; |
| 64 | /* |
| 65 | * First copy in the header so that we can check the size field. |
| 66 | */ |
| 67 | if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { |
| 68 | aac_fib_free(fibptr); |
| 69 | return -EFAULT; |
| 70 | } |
| 71 | /* |
| 72 | * Since we copy based on the fib header size, make sure that we |
| 73 | * will not overrun the buffer when we copy the memory. Return |
| 74 | * an error if we would. |
| 75 | */ |
| 76 | osize = size = le16_to_cpu(kfib->header.Size) + |
| 77 | sizeof(struct aac_fibhdr); |
| 78 | if (size < le16_to_cpu(kfib->header.SenderSize)) |
| 79 | size = le16_to_cpu(kfib->header.SenderSize); |
| 80 | if (size > dev->max_fib_size) { |
| 81 | dma_addr_t daddr; |
| 82 | |
| 83 | if (size > 2048) { |
| 84 | retval = -EINVAL; |
| 85 | goto cleanup; |
| 86 | } |
| 87 | |
| 88 | kfib = dma_alloc_coherent(&dev->pdev->dev, size, &daddr, |
| 89 | GFP_KERNEL); |
| 90 | if (!kfib) { |
| 91 | retval = -ENOMEM; |
| 92 | goto cleanup; |
| 93 | } |
| 94 | |
| 95 | /* Highjack the hw_fib */ |
| 96 | hw_fib = fibptr->hw_fib_va; |
| 97 | hw_fib_pa = fibptr->hw_fib_pa; |
| 98 | fibptr->hw_fib_va = kfib; |
| 99 | fibptr->hw_fib_pa = daddr; |
| 100 | memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); |
| 101 | memcpy(kfib, hw_fib, dev->max_fib_size); |
| 102 | } |
| 103 | |
| 104 | if (copy_from_user(kfib, arg, size)) { |
| 105 | retval = -EFAULT; |
| 106 | goto cleanup; |
| 107 | } |
| 108 | |
| 109 | /* Sanity check the second copy */ |
| 110 | if ((osize != le16_to_cpu(kfib->header.Size) + |
| 111 | sizeof(struct aac_fibhdr)) |
| 112 | || (size < le16_to_cpu(kfib->header.SenderSize))) { |
| 113 | retval = -EINVAL; |
| 114 | goto cleanup; |
| 115 | } |
| 116 | |
| 117 | if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { |
| 118 | aac_adapter_interrupt(dev); |
| 119 | /* |
| 120 | * Since we didn't really send a fib, zero out the state to allow |
| 121 | * cleanup code not to assert. |
| 122 | */ |
| 123 | kfib->header.XferState = 0; |
| 124 | } else { |
| 125 | retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr, |
| 126 | le16_to_cpu(kfib->header.Size) , FsaNormal, |
| 127 | 1, 1, NULL, NULL); |
| 128 | if (retval) { |
| 129 | goto cleanup; |
| 130 | } |
| 131 | if (aac_fib_complete(fibptr) != 0) { |
| 132 | retval = -EINVAL; |
| 133 | goto cleanup; |
| 134 | } |
| 135 | } |
| 136 | /* |
| 137 | * Make sure that the size returned by the adapter (which includes |
| 138 | * the header) is less than or equal to the size of a fib, so we |
| 139 | * don't corrupt application data. Then copy that size to the user |
| 140 | * buffer. (Don't try to add the header information again, since it |
| 141 | * was already included by the adapter.) |
| 142 | */ |
| 143 | |
| 144 | retval = 0; |
| 145 | if (copy_to_user(arg, (void *)kfib, size)) |
| 146 | retval = -EFAULT; |
| 147 | cleanup: |
| 148 | if (hw_fib) { |
| 149 | dma_free_coherent(&dev->pdev->dev, size, kfib, |
| 150 | fibptr->hw_fib_pa); |
| 151 | fibptr->hw_fib_pa = hw_fib_pa; |
| 152 | fibptr->hw_fib_va = hw_fib; |
| 153 | } |
| 154 | if (retval != -ERESTARTSYS) |
| 155 | aac_fib_free(fibptr); |
| 156 | return retval; |
| 157 | } |
| 158 | |
| 159 | /** |
| 160 | * open_getadapter_fib - Get the next fib |
| 161 | * |
| 162 | * This routine will get the next Fib, if available, from the AdapterFibContext |
| 163 | * passed in from the user. |
| 164 | */ |
| 165 | |
| 166 | static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) |
| 167 | { |
| 168 | struct aac_fib_context * fibctx; |
| 169 | int status; |
| 170 | |
| 171 | fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL); |
| 172 | if (fibctx == NULL) { |
| 173 | status = -ENOMEM; |
| 174 | } else { |
| 175 | unsigned long flags; |
| 176 | struct list_head * entry; |
| 177 | struct aac_fib_context * context; |
| 178 | |
| 179 | fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT; |
| 180 | fibctx->size = sizeof(struct aac_fib_context); |
| 181 | /* |
| 182 | * Yes yes, I know this could be an index, but we have a |
| 183 | * better guarantee of uniqueness for the locked loop below. |
| 184 | * Without the aid of a persistent history, this also helps |
| 185 | * reduce the chance that the opaque context would be reused. |
| 186 | */ |
| 187 | fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF); |
| 188 | /* |
| 189 | * Initialize the mutex used to wait for the next AIF. |
| 190 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 191 | init_completion(&fibctx->completion); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 192 | fibctx->wait = 0; |
| 193 | /* |
| 194 | * Initialize the fibs and set the count of fibs on |
| 195 | * the list to 0. |
| 196 | */ |
| 197 | fibctx->count = 0; |
| 198 | INIT_LIST_HEAD(&fibctx->fib_list); |
| 199 | fibctx->jiffies = jiffies/HZ; |
| 200 | /* |
| 201 | * Now add this context onto the adapter's |
| 202 | * AdapterFibContext list. |
| 203 | */ |
| 204 | spin_lock_irqsave(&dev->fib_lock, flags); |
| 205 | /* Ensure that we have a unique identifier */ |
| 206 | entry = dev->fib_list.next; |
| 207 | while (entry != &dev->fib_list) { |
| 208 | context = list_entry(entry, struct aac_fib_context, next); |
| 209 | if (context->unique == fibctx->unique) { |
| 210 | /* Not unique (32 bits) */ |
| 211 | fibctx->unique++; |
| 212 | entry = dev->fib_list.next; |
| 213 | } else { |
| 214 | entry = entry->next; |
| 215 | } |
| 216 | } |
| 217 | list_add_tail(&fibctx->next, &dev->fib_list); |
| 218 | spin_unlock_irqrestore(&dev->fib_lock, flags); |
| 219 | if (copy_to_user(arg, &fibctx->unique, |
| 220 | sizeof(fibctx->unique))) { |
| 221 | status = -EFAULT; |
| 222 | } else { |
| 223 | status = 0; |
| 224 | } |
| 225 | } |
| 226 | return status; |
| 227 | } |
| 228 | |
| 229 | /** |
| 230 | * next_getadapter_fib - get the next fib |
| 231 | * @dev: adapter to use |
| 232 | * @arg: ioctl argument |
| 233 | * |
| 234 | * This routine will get the next Fib, if available, from the AdapterFibContext |
| 235 | * passed in from the user. |
| 236 | */ |
| 237 | |
| 238 | static int next_getadapter_fib(struct aac_dev * dev, void __user *arg) |
| 239 | { |
| 240 | struct fib_ioctl f; |
| 241 | struct fib *fib; |
| 242 | struct aac_fib_context *fibctx; |
| 243 | int status; |
| 244 | struct list_head * entry; |
| 245 | unsigned long flags; |
| 246 | |
| 247 | if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl))) |
| 248 | return -EFAULT; |
| 249 | /* |
| 250 | * Verify that the HANDLE passed in was a valid AdapterFibContext |
| 251 | * |
| 252 | * Search the list of AdapterFibContext addresses on the adapter |
| 253 | * to be sure this is a valid address |
| 254 | */ |
| 255 | spin_lock_irqsave(&dev->fib_lock, flags); |
| 256 | entry = dev->fib_list.next; |
| 257 | fibctx = NULL; |
| 258 | |
| 259 | while (entry != &dev->fib_list) { |
| 260 | fibctx = list_entry(entry, struct aac_fib_context, next); |
| 261 | /* |
| 262 | * Extract the AdapterFibContext from the Input parameters. |
| 263 | */ |
| 264 | if (fibctx->unique == f.fibctx) { /* We found a winner */ |
| 265 | break; |
| 266 | } |
| 267 | entry = entry->next; |
| 268 | fibctx = NULL; |
| 269 | } |
| 270 | if (!fibctx) { |
| 271 | spin_unlock_irqrestore(&dev->fib_lock, flags); |
| 272 | dprintk ((KERN_INFO "Fib Context not found\n")); |
| 273 | return -EINVAL; |
| 274 | } |
| 275 | |
| 276 | if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || |
| 277 | (fibctx->size != sizeof(struct aac_fib_context))) { |
| 278 | spin_unlock_irqrestore(&dev->fib_lock, flags); |
| 279 | dprintk ((KERN_INFO "Fib Context corrupt?\n")); |
| 280 | return -EINVAL; |
| 281 | } |
| 282 | status = 0; |
| 283 | /* |
| 284 | * If there are no fibs to send back, then either wait or return |
| 285 | * -EAGAIN |
| 286 | */ |
| 287 | return_fib: |
| 288 | if (!list_empty(&fibctx->fib_list)) { |
| 289 | /* |
| 290 | * Pull the next fib from the fibs |
| 291 | */ |
| 292 | entry = fibctx->fib_list.next; |
| 293 | list_del(entry); |
| 294 | |
| 295 | fib = list_entry(entry, struct fib, fiblink); |
| 296 | fibctx->count--; |
| 297 | spin_unlock_irqrestore(&dev->fib_lock, flags); |
| 298 | if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) { |
| 299 | kfree(fib->hw_fib_va); |
| 300 | kfree(fib); |
| 301 | return -EFAULT; |
| 302 | } |
| 303 | /* |
| 304 | * Free the space occupied by this copy of the fib. |
| 305 | */ |
| 306 | kfree(fib->hw_fib_va); |
| 307 | kfree(fib); |
| 308 | status = 0; |
| 309 | } else { |
| 310 | spin_unlock_irqrestore(&dev->fib_lock, flags); |
| 311 | /* If someone killed the AIF aacraid thread, restart it */ |
| 312 | status = !dev->aif_thread; |
| 313 | if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { |
| 314 | /* Be paranoid, be very paranoid! */ |
| 315 | kthread_stop(dev->thread); |
| 316 | ssleep(1); |
| 317 | dev->aif_thread = 0; |
| 318 | dev->thread = kthread_run(aac_command_thread, dev, |
| 319 | "%s", dev->name); |
| 320 | ssleep(1); |
| 321 | } |
| 322 | if (f.wait) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 323 | if (wait_for_completion_interruptible(&fibctx->completion) < 0) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 324 | status = -ERESTARTSYS; |
| 325 | } else { |
| 326 | /* Lock again and retry */ |
| 327 | spin_lock_irqsave(&dev->fib_lock, flags); |
| 328 | goto return_fib; |
| 329 | } |
| 330 | } else { |
| 331 | status = -EAGAIN; |
| 332 | } |
| 333 | } |
| 334 | fibctx->jiffies = jiffies/HZ; |
| 335 | return status; |
| 336 | } |
| 337 | |
| 338 | int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx) |
| 339 | { |
| 340 | struct fib *fib; |
| 341 | |
| 342 | /* |
| 343 | * First free any FIBs that have not been consumed. |
| 344 | */ |
| 345 | while (!list_empty(&fibctx->fib_list)) { |
| 346 | struct list_head * entry; |
| 347 | /* |
| 348 | * Pull the next fib from the fibs |
| 349 | */ |
| 350 | entry = fibctx->fib_list.next; |
| 351 | list_del(entry); |
| 352 | fib = list_entry(entry, struct fib, fiblink); |
| 353 | fibctx->count--; |
| 354 | /* |
| 355 | * Free the space occupied by this copy of the fib. |
| 356 | */ |
| 357 | kfree(fib->hw_fib_va); |
| 358 | kfree(fib); |
| 359 | } |
| 360 | /* |
| 361 | * Remove the Context from the AdapterFibContext List |
| 362 | */ |
| 363 | list_del(&fibctx->next); |
| 364 | /* |
| 365 | * Invalidate context |
| 366 | */ |
| 367 | fibctx->type = 0; |
| 368 | /* |
| 369 | * Free the space occupied by the Context |
| 370 | */ |
| 371 | kfree(fibctx); |
| 372 | return 0; |
| 373 | } |
| 374 | |
| 375 | /** |
| 376 | * close_getadapter_fib - close down user fib context |
| 377 | * @dev: adapter |
| 378 | * @arg: ioctl arguments |
| 379 | * |
| 380 | * This routine will close down the fibctx passed in from the user. |
| 381 | */ |
| 382 | |
| 383 | static int close_getadapter_fib(struct aac_dev * dev, void __user *arg) |
| 384 | { |
| 385 | struct aac_fib_context *fibctx; |
| 386 | int status; |
| 387 | unsigned long flags; |
| 388 | struct list_head * entry; |
| 389 | |
| 390 | /* |
| 391 | * Verify that the HANDLE passed in was a valid AdapterFibContext |
| 392 | * |
| 393 | * Search the list of AdapterFibContext addresses on the adapter |
| 394 | * to be sure this is a valid address |
| 395 | */ |
| 396 | |
| 397 | entry = dev->fib_list.next; |
| 398 | fibctx = NULL; |
| 399 | |
| 400 | while(entry != &dev->fib_list) { |
| 401 | fibctx = list_entry(entry, struct aac_fib_context, next); |
| 402 | /* |
| 403 | * Extract the fibctx from the input parameters |
| 404 | */ |
| 405 | if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */ |
| 406 | break; |
| 407 | entry = entry->next; |
| 408 | fibctx = NULL; |
| 409 | } |
| 410 | |
| 411 | if (!fibctx) |
| 412 | return 0; /* Already gone */ |
| 413 | |
| 414 | if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || |
| 415 | (fibctx->size != sizeof(struct aac_fib_context))) |
| 416 | return -EINVAL; |
| 417 | spin_lock_irqsave(&dev->fib_lock, flags); |
| 418 | status = aac_close_fib_context(dev, fibctx); |
| 419 | spin_unlock_irqrestore(&dev->fib_lock, flags); |
| 420 | return status; |
| 421 | } |
| 422 | |
| 423 | /** |
| 424 | * check_revision - close down user fib context |
| 425 | * @dev: adapter |
| 426 | * @arg: ioctl arguments |
| 427 | * |
| 428 | * This routine returns the driver version. |
| 429 | * Under Linux, there have been no version incompatibilities, so this is |
| 430 | * simple! |
| 431 | */ |
| 432 | |
| 433 | static int check_revision(struct aac_dev *dev, void __user *arg) |
| 434 | { |
| 435 | struct revision response; |
| 436 | char *driver_version = aac_driver_version; |
| 437 | u32 version; |
| 438 | |
| 439 | response.compat = 1; |
| 440 | version = (simple_strtol(driver_version, |
| 441 | &driver_version, 10) << 24) | 0x00000400; |
| 442 | version += simple_strtol(driver_version + 1, &driver_version, 10) << 16; |
| 443 | version += simple_strtol(driver_version + 1, NULL, 10); |
| 444 | response.version = cpu_to_le32(version); |
| 445 | # ifdef AAC_DRIVER_BUILD |
| 446 | response.build = cpu_to_le32(AAC_DRIVER_BUILD); |
| 447 | # else |
| 448 | response.build = cpu_to_le32(9999); |
| 449 | # endif |
| 450 | |
| 451 | if (copy_to_user(arg, &response, sizeof(response))) |
| 452 | return -EFAULT; |
| 453 | return 0; |
| 454 | } |
| 455 | |
| 456 | |
| 457 | /** |
| 458 | * |
| 459 | * aac_send_raw_scb |
| 460 | * |
| 461 | */ |
| 462 | |
| 463 | static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) |
| 464 | { |
| 465 | struct fib* srbfib; |
| 466 | int status; |
| 467 | struct aac_srb *srbcmd = NULL; |
| 468 | struct aac_hba_cmd_req *hbacmd = NULL; |
| 469 | struct user_aac_srb *user_srbcmd = NULL; |
| 470 | struct user_aac_srb __user *user_srb = arg; |
| 471 | struct aac_srb_reply __user *user_reply; |
| 472 | u32 chn; |
| 473 | u32 fibsize = 0; |
| 474 | u32 flags = 0; |
| 475 | s32 rcode = 0; |
| 476 | u32 data_dir; |
| 477 | void __user *sg_user[HBA_MAX_SG_EMBEDDED]; |
| 478 | void *sg_list[HBA_MAX_SG_EMBEDDED]; |
| 479 | u32 sg_count[HBA_MAX_SG_EMBEDDED]; |
| 480 | u32 sg_indx = 0; |
| 481 | u32 byte_count = 0; |
| 482 | u32 actual_fibsize64, actual_fibsize = 0; |
| 483 | int i; |
| 484 | int is_native_device; |
| 485 | u64 address; |
| 486 | |
| 487 | |
| 488 | if (dev->in_reset) { |
| 489 | dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n")); |
| 490 | return -EBUSY; |
| 491 | } |
| 492 | if (!capable(CAP_SYS_ADMIN)){ |
| 493 | dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); |
| 494 | return -EPERM; |
| 495 | } |
| 496 | /* |
| 497 | * Allocate and initialize a Fib then setup a SRB command |
| 498 | */ |
| 499 | if (!(srbfib = aac_fib_alloc(dev))) { |
| 500 | return -ENOMEM; |
| 501 | } |
| 502 | |
| 503 | memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ |
| 504 | if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){ |
| 505 | dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n")); |
| 506 | rcode = -EFAULT; |
| 507 | goto cleanup; |
| 508 | } |
| 509 | |
| 510 | if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || |
| 511 | (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { |
| 512 | rcode = -EINVAL; |
| 513 | goto cleanup; |
| 514 | } |
| 515 | |
| 516 | user_srbcmd = kmalloc(fibsize, GFP_KERNEL); |
| 517 | if (!user_srbcmd) { |
| 518 | dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n")); |
| 519 | rcode = -ENOMEM; |
| 520 | goto cleanup; |
| 521 | } |
| 522 | if(copy_from_user(user_srbcmd, user_srb,fibsize)){ |
| 523 | dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n")); |
| 524 | rcode = -EFAULT; |
| 525 | goto cleanup; |
| 526 | } |
| 527 | |
| 528 | flags = user_srbcmd->flags; /* from user in cpu order */ |
| 529 | switch (flags & (SRB_DataIn | SRB_DataOut)) { |
| 530 | case SRB_DataOut: |
| 531 | data_dir = DMA_TO_DEVICE; |
| 532 | break; |
| 533 | case (SRB_DataIn | SRB_DataOut): |
| 534 | data_dir = DMA_BIDIRECTIONAL; |
| 535 | break; |
| 536 | case SRB_DataIn: |
| 537 | data_dir = DMA_FROM_DEVICE; |
| 538 | break; |
| 539 | default: |
| 540 | data_dir = DMA_NONE; |
| 541 | } |
| 542 | if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { |
| 543 | dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", |
| 544 | user_srbcmd->sg.count)); |
| 545 | rcode = -EINVAL; |
| 546 | goto cleanup; |
| 547 | } |
| 548 | if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { |
| 549 | dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n")); |
| 550 | rcode = -EINVAL; |
| 551 | goto cleanup; |
| 552 | } |
| 553 | actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + |
| 554 | ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); |
| 555 | actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * |
| 556 | (sizeof(struct sgentry64) - sizeof(struct sgentry)); |
| 557 | /* User made a mistake - should not continue */ |
| 558 | if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) { |
| 559 | dprintk((KERN_DEBUG"aacraid: Bad Size specified in " |
| 560 | "Raw SRB command calculated fibsize=%lu;%lu " |
| 561 | "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu " |
| 562 | "issued fibsize=%d\n", |
| 563 | actual_fibsize, actual_fibsize64, user_srbcmd->sg.count, |
| 564 | sizeof(struct aac_srb), sizeof(struct sgentry), |
| 565 | sizeof(struct sgentry64), fibsize)); |
| 566 | rcode = -EINVAL; |
| 567 | goto cleanup; |
| 568 | } |
| 569 | |
| 570 | chn = user_srbcmd->channel; |
| 571 | if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS && |
| 572 | dev->hba_map[chn][user_srbcmd->id].devtype == |
| 573 | AAC_DEVTYPE_NATIVE_RAW) { |
| 574 | is_native_device = 1; |
| 575 | hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va; |
| 576 | memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */ |
| 577 | |
| 578 | /* iu_type is a parameter of aac_hba_send */ |
| 579 | switch (data_dir) { |
| 580 | case DMA_TO_DEVICE: |
| 581 | hbacmd->byte1 = 2; |
| 582 | break; |
| 583 | case DMA_FROM_DEVICE: |
| 584 | case DMA_BIDIRECTIONAL: |
| 585 | hbacmd->byte1 = 1; |
| 586 | break; |
| 587 | case DMA_NONE: |
| 588 | default: |
| 589 | break; |
| 590 | } |
| 591 | hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun); |
| 592 | hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus; |
| 593 | |
| 594 | /* |
| 595 | * we fill in reply_qid later in aac_src_deliver_message |
| 596 | * we fill in iu_type, request_id later in aac_hba_send |
| 597 | * we fill in emb_data_desc_count, data_length later |
| 598 | * in sg list build |
| 599 | */ |
| 600 | |
| 601 | memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb)); |
| 602 | |
| 603 | address = (u64)srbfib->hw_error_pa; |
| 604 | hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); |
| 605 | hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); |
| 606 | hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); |
| 607 | hbacmd->emb_data_desc_count = |
| 608 | cpu_to_le32(user_srbcmd->sg.count); |
| 609 | srbfib->hbacmd_size = 64 + |
| 610 | user_srbcmd->sg.count * sizeof(struct aac_hba_sgl); |
| 611 | |
| 612 | } else { |
| 613 | is_native_device = 0; |
| 614 | aac_fib_init(srbfib); |
| 615 | |
| 616 | /* raw_srb FIB is not FastResponseCapable */ |
| 617 | srbfib->hw_fib_va->header.XferState &= |
| 618 | ~cpu_to_le32(FastResponseCapable); |
| 619 | |
| 620 | srbcmd = (struct aac_srb *) fib_data(srbfib); |
| 621 | |
| 622 | // Fix up srb for endian and force some values |
| 623 | |
| 624 | srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this |
| 625 | srbcmd->channel = cpu_to_le32(user_srbcmd->channel); |
| 626 | srbcmd->id = cpu_to_le32(user_srbcmd->id); |
| 627 | srbcmd->lun = cpu_to_le32(user_srbcmd->lun); |
| 628 | srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); |
| 629 | srbcmd->flags = cpu_to_le32(flags); |
| 630 | srbcmd->retry_limit = 0; // Obsolete parameter |
| 631 | srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); |
| 632 | memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); |
| 633 | } |
| 634 | |
| 635 | byte_count = 0; |
| 636 | if (is_native_device) { |
| 637 | struct user_sgmap *usg32 = &user_srbcmd->sg; |
| 638 | struct user_sgmap64 *usg64 = |
| 639 | (struct user_sgmap64 *)&user_srbcmd->sg; |
| 640 | |
| 641 | for (i = 0; i < usg32->count; i++) { |
| 642 | void *p; |
| 643 | u64 addr; |
| 644 | |
| 645 | sg_count[i] = (actual_fibsize64 == fibsize) ? |
| 646 | usg64->sg[i].count : usg32->sg[i].count; |
| 647 | if (sg_count[i] > |
| 648 | (dev->scsi_host_ptr->max_sectors << 9)) { |
| 649 | pr_err("aacraid: upsg->sg[%d].count=%u>%u\n", |
| 650 | i, sg_count[i], |
| 651 | dev->scsi_host_ptr->max_sectors << 9); |
| 652 | rcode = -EINVAL; |
| 653 | goto cleanup; |
| 654 | } |
| 655 | |
| 656 | p = kmalloc(sg_count[i], GFP_KERNEL); |
| 657 | if (!p) { |
| 658 | rcode = -ENOMEM; |
| 659 | goto cleanup; |
| 660 | } |
| 661 | |
| 662 | if (actual_fibsize64 == fibsize) { |
| 663 | addr = (u64)usg64->sg[i].addr[0]; |
| 664 | addr += ((u64)usg64->sg[i].addr[1]) << 32; |
| 665 | } else { |
| 666 | addr = (u64)usg32->sg[i].addr; |
| 667 | } |
| 668 | |
| 669 | sg_user[i] = (void __user *)(uintptr_t)addr; |
| 670 | sg_list[i] = p; // save so we can clean up later |
| 671 | sg_indx = i; |
| 672 | |
| 673 | if (flags & SRB_DataOut) { |
| 674 | if (copy_from_user(p, sg_user[i], |
| 675 | sg_count[i])) { |
| 676 | rcode = -EFAULT; |
| 677 | goto cleanup; |
| 678 | } |
| 679 | } |
| 680 | addr = pci_map_single(dev->pdev, p, sg_count[i], |
| 681 | data_dir); |
| 682 | hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32)); |
| 683 | hbacmd->sge[i].addr_lo = cpu_to_le32( |
| 684 | (u32)(addr & 0xffffffff)); |
| 685 | hbacmd->sge[i].len = cpu_to_le32(sg_count[i]); |
| 686 | hbacmd->sge[i].flags = 0; |
| 687 | byte_count += sg_count[i]; |
| 688 | } |
| 689 | |
| 690 | if (usg32->count > 0) /* embedded sglist */ |
| 691 | hbacmd->sge[usg32->count-1].flags = |
| 692 | cpu_to_le32(0x40000000); |
| 693 | hbacmd->data_length = cpu_to_le32(byte_count); |
| 694 | |
| 695 | status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib, |
| 696 | NULL, NULL); |
| 697 | |
| 698 | } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) { |
| 699 | struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; |
| 700 | struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; |
| 701 | |
| 702 | /* |
| 703 | * This should also catch if user used the 32 bit sgmap |
| 704 | */ |
| 705 | if (actual_fibsize64 == fibsize) { |
| 706 | actual_fibsize = actual_fibsize64; |
| 707 | for (i = 0; i < upsg->count; i++) { |
| 708 | u64 addr; |
| 709 | void* p; |
| 710 | |
| 711 | sg_count[i] = upsg->sg[i].count; |
| 712 | if (sg_count[i] > |
| 713 | ((dev->adapter_info.options & |
| 714 | AAC_OPT_NEW_COMM) ? |
| 715 | (dev->scsi_host_ptr->max_sectors << 9) : |
| 716 | 65536)) { |
| 717 | rcode = -EINVAL; |
| 718 | goto cleanup; |
| 719 | } |
| 720 | |
| 721 | p = kmalloc(sg_count[i], GFP_KERNEL); |
| 722 | if(!p) { |
| 723 | dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", |
| 724 | sg_count[i], i, upsg->count)); |
| 725 | rcode = -ENOMEM; |
| 726 | goto cleanup; |
| 727 | } |
| 728 | addr = (u64)upsg->sg[i].addr[0]; |
| 729 | addr += ((u64)upsg->sg[i].addr[1]) << 32; |
| 730 | sg_user[i] = (void __user *)(uintptr_t)addr; |
| 731 | sg_list[i] = p; // save so we can clean up later |
| 732 | sg_indx = i; |
| 733 | |
| 734 | if (flags & SRB_DataOut) { |
| 735 | if (copy_from_user(p, sg_user[i], |
| 736 | sg_count[i])){ |
| 737 | dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); |
| 738 | rcode = -EFAULT; |
| 739 | goto cleanup; |
| 740 | } |
| 741 | } |
| 742 | addr = pci_map_single(dev->pdev, p, |
| 743 | sg_count[i], data_dir); |
| 744 | |
| 745 | psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); |
| 746 | psg->sg[i].addr[1] = cpu_to_le32(addr>>32); |
| 747 | byte_count += sg_count[i]; |
| 748 | psg->sg[i].count = cpu_to_le32(sg_count[i]); |
| 749 | } |
| 750 | } else { |
| 751 | struct user_sgmap* usg; |
| 752 | usg = kmemdup(upsg, |
| 753 | actual_fibsize - sizeof(struct aac_srb) |
| 754 | + sizeof(struct sgmap), GFP_KERNEL); |
| 755 | if (!usg) { |
| 756 | dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); |
| 757 | rcode = -ENOMEM; |
| 758 | goto cleanup; |
| 759 | } |
| 760 | actual_fibsize = actual_fibsize64; |
| 761 | |
| 762 | for (i = 0; i < usg->count; i++) { |
| 763 | u64 addr; |
| 764 | void* p; |
| 765 | |
| 766 | sg_count[i] = usg->sg[i].count; |
| 767 | if (sg_count[i] > |
| 768 | ((dev->adapter_info.options & |
| 769 | AAC_OPT_NEW_COMM) ? |
| 770 | (dev->scsi_host_ptr->max_sectors << 9) : |
| 771 | 65536)) { |
| 772 | kfree(usg); |
| 773 | rcode = -EINVAL; |
| 774 | goto cleanup; |
| 775 | } |
| 776 | |
| 777 | p = kmalloc(sg_count[i], GFP_KERNEL); |
| 778 | if(!p) { |
| 779 | dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", |
| 780 | sg_count[i], i, usg->count)); |
| 781 | kfree(usg); |
| 782 | rcode = -ENOMEM; |
| 783 | goto cleanup; |
| 784 | } |
| 785 | sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr; |
| 786 | sg_list[i] = p; // save so we can clean up later |
| 787 | sg_indx = i; |
| 788 | |
| 789 | if (flags & SRB_DataOut) { |
| 790 | if (copy_from_user(p, sg_user[i], |
| 791 | sg_count[i])) { |
| 792 | kfree (usg); |
| 793 | dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); |
| 794 | rcode = -EFAULT; |
| 795 | goto cleanup; |
| 796 | } |
| 797 | } |
| 798 | addr = pci_map_single(dev->pdev, p, |
| 799 | sg_count[i], data_dir); |
| 800 | |
| 801 | psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); |
| 802 | psg->sg[i].addr[1] = cpu_to_le32(addr>>32); |
| 803 | byte_count += sg_count[i]; |
| 804 | psg->sg[i].count = cpu_to_le32(sg_count[i]); |
| 805 | } |
| 806 | kfree (usg); |
| 807 | } |
| 808 | srbcmd->count = cpu_to_le32(byte_count); |
| 809 | if (user_srbcmd->sg.count) |
| 810 | psg->count = cpu_to_le32(sg_indx+1); |
| 811 | else |
| 812 | psg->count = 0; |
| 813 | status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); |
| 814 | } else { |
| 815 | struct user_sgmap* upsg = &user_srbcmd->sg; |
| 816 | struct sgmap* psg = &srbcmd->sg; |
| 817 | |
| 818 | if (actual_fibsize64 == fibsize) { |
| 819 | struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; |
| 820 | for (i = 0; i < upsg->count; i++) { |
| 821 | uintptr_t addr; |
| 822 | void* p; |
| 823 | |
| 824 | sg_count[i] = usg->sg[i].count; |
| 825 | if (sg_count[i] > |
| 826 | ((dev->adapter_info.options & |
| 827 | AAC_OPT_NEW_COMM) ? |
| 828 | (dev->scsi_host_ptr->max_sectors << 9) : |
| 829 | 65536)) { |
| 830 | rcode = -EINVAL; |
| 831 | goto cleanup; |
| 832 | } |
| 833 | p = kmalloc(sg_count[i], GFP_KERNEL); |
| 834 | if (!p) { |
| 835 | dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", |
| 836 | sg_count[i], i, usg->count)); |
| 837 | rcode = -ENOMEM; |
| 838 | goto cleanup; |
| 839 | } |
| 840 | addr = (u64)usg->sg[i].addr[0]; |
| 841 | addr += ((u64)usg->sg[i].addr[1]) << 32; |
| 842 | sg_user[i] = (void __user *)addr; |
| 843 | sg_list[i] = p; // save so we can clean up later |
| 844 | sg_indx = i; |
| 845 | |
| 846 | if (flags & SRB_DataOut) { |
| 847 | if (copy_from_user(p, sg_user[i], |
| 848 | sg_count[i])){ |
| 849 | dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); |
| 850 | rcode = -EFAULT; |
| 851 | goto cleanup; |
| 852 | } |
| 853 | } |
| 854 | addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); |
| 855 | |
| 856 | psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff); |
| 857 | byte_count += usg->sg[i].count; |
| 858 | psg->sg[i].count = cpu_to_le32(sg_count[i]); |
| 859 | } |
| 860 | } else { |
| 861 | for (i = 0; i < upsg->count; i++) { |
| 862 | dma_addr_t addr; |
| 863 | void* p; |
| 864 | |
| 865 | sg_count[i] = upsg->sg[i].count; |
| 866 | if (sg_count[i] > |
| 867 | ((dev->adapter_info.options & |
| 868 | AAC_OPT_NEW_COMM) ? |
| 869 | (dev->scsi_host_ptr->max_sectors << 9) : |
| 870 | 65536)) { |
| 871 | rcode = -EINVAL; |
| 872 | goto cleanup; |
| 873 | } |
| 874 | p = kmalloc(sg_count[i], GFP_KERNEL); |
| 875 | if (!p) { |
| 876 | dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", |
| 877 | sg_count[i], i, upsg->count)); |
| 878 | rcode = -ENOMEM; |
| 879 | goto cleanup; |
| 880 | } |
| 881 | sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr; |
| 882 | sg_list[i] = p; // save so we can clean up later |
| 883 | sg_indx = i; |
| 884 | |
| 885 | if (flags & SRB_DataOut) { |
| 886 | if (copy_from_user(p, sg_user[i], |
| 887 | sg_count[i])) { |
| 888 | dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); |
| 889 | rcode = -EFAULT; |
| 890 | goto cleanup; |
| 891 | } |
| 892 | } |
| 893 | addr = pci_map_single(dev->pdev, p, |
| 894 | sg_count[i], data_dir); |
| 895 | |
| 896 | psg->sg[i].addr = cpu_to_le32(addr); |
| 897 | byte_count += sg_count[i]; |
| 898 | psg->sg[i].count = cpu_to_le32(sg_count[i]); |
| 899 | } |
| 900 | } |
| 901 | srbcmd->count = cpu_to_le32(byte_count); |
| 902 | if (user_srbcmd->sg.count) |
| 903 | psg->count = cpu_to_le32(sg_indx+1); |
| 904 | else |
| 905 | psg->count = 0; |
| 906 | status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); |
| 907 | } |
| 908 | |
| 909 | if (status == -ERESTARTSYS) { |
| 910 | rcode = -ERESTARTSYS; |
| 911 | goto cleanup; |
| 912 | } |
| 913 | |
| 914 | if (status != 0) { |
| 915 | dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); |
| 916 | rcode = -ENXIO; |
| 917 | goto cleanup; |
| 918 | } |
| 919 | |
| 920 | if (flags & SRB_DataIn) { |
| 921 | for(i = 0 ; i <= sg_indx; i++){ |
| 922 | if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) { |
| 923 | dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); |
| 924 | rcode = -EFAULT; |
| 925 | goto cleanup; |
| 926 | |
| 927 | } |
| 928 | } |
| 929 | } |
| 930 | |
| 931 | user_reply = arg + fibsize; |
| 932 | if (is_native_device) { |
| 933 | struct aac_hba_resp *err = |
| 934 | &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err; |
| 935 | struct aac_srb_reply reply; |
| 936 | |
| 937 | memset(&reply, 0, sizeof(reply)); |
| 938 | reply.status = ST_OK; |
| 939 | if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) { |
| 940 | /* fast response */ |
| 941 | reply.srb_status = SRB_STATUS_SUCCESS; |
| 942 | reply.scsi_status = 0; |
| 943 | reply.data_xfer_length = byte_count; |
| 944 | reply.sense_data_size = 0; |
| 945 | memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE); |
| 946 | } else { |
| 947 | reply.srb_status = err->service_response; |
| 948 | reply.scsi_status = err->status; |
| 949 | reply.data_xfer_length = byte_count - |
| 950 | le32_to_cpu(err->residual_count); |
| 951 | reply.sense_data_size = err->sense_response_data_len; |
| 952 | memcpy(reply.sense_data, err->sense_response_buf, |
| 953 | AAC_SENSE_BUFFERSIZE); |
| 954 | } |
| 955 | if (copy_to_user(user_reply, &reply, |
| 956 | sizeof(struct aac_srb_reply))) { |
| 957 | dprintk((KERN_DEBUG"aacraid: Copy to user failed\n")); |
| 958 | rcode = -EFAULT; |
| 959 | goto cleanup; |
| 960 | } |
| 961 | } else { |
| 962 | struct aac_srb_reply *reply; |
| 963 | |
| 964 | reply = (struct aac_srb_reply *) fib_data(srbfib); |
| 965 | if (copy_to_user(user_reply, reply, |
| 966 | sizeof(struct aac_srb_reply))) { |
| 967 | dprintk((KERN_DEBUG"aacraid: Copy to user failed\n")); |
| 968 | rcode = -EFAULT; |
| 969 | goto cleanup; |
| 970 | } |
| 971 | } |
| 972 | |
| 973 | cleanup: |
| 974 | kfree(user_srbcmd); |
| 975 | if (rcode != -ERESTARTSYS) { |
| 976 | for (i = 0; i <= sg_indx; i++) |
| 977 | kfree(sg_list[i]); |
| 978 | aac_fib_complete(srbfib); |
| 979 | aac_fib_free(srbfib); |
| 980 | } |
| 981 | |
| 982 | return rcode; |
| 983 | } |
| 984 | |
| 985 | struct aac_pci_info { |
| 986 | u32 bus; |
| 987 | u32 slot; |
| 988 | }; |
| 989 | |
| 990 | |
| 991 | static int aac_get_pci_info(struct aac_dev* dev, void __user *arg) |
| 992 | { |
| 993 | struct aac_pci_info pci_info; |
| 994 | |
| 995 | pci_info.bus = dev->pdev->bus->number; |
| 996 | pci_info.slot = PCI_SLOT(dev->pdev->devfn); |
| 997 | |
| 998 | if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { |
| 999 | dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); |
| 1000 | return -EFAULT; |
| 1001 | } |
| 1002 | return 0; |
| 1003 | } |
| 1004 | |
| 1005 | static int aac_get_hba_info(struct aac_dev *dev, void __user *arg) |
| 1006 | { |
| 1007 | struct aac_hba_info hbainfo; |
| 1008 | |
| 1009 | memset(&hbainfo, 0, sizeof(hbainfo)); |
| 1010 | hbainfo.adapter_number = (u8) dev->id; |
| 1011 | hbainfo.system_io_bus_number = dev->pdev->bus->number; |
| 1012 | hbainfo.device_number = (dev->pdev->devfn >> 3); |
| 1013 | hbainfo.function_number = (dev->pdev->devfn & 0x0007); |
| 1014 | |
| 1015 | hbainfo.vendor_id = dev->pdev->vendor; |
| 1016 | hbainfo.device_id = dev->pdev->device; |
| 1017 | hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor; |
| 1018 | hbainfo.sub_system_id = dev->pdev->subsystem_device; |
| 1019 | |
| 1020 | if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) { |
| 1021 | dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n")); |
| 1022 | return -EFAULT; |
| 1023 | } |
| 1024 | |
| 1025 | return 0; |
| 1026 | } |
| 1027 | |
| 1028 | struct aac_reset_iop { |
| 1029 | u8 reset_type; |
| 1030 | }; |
| 1031 | |
| 1032 | static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg) |
| 1033 | { |
| 1034 | struct aac_reset_iop reset; |
| 1035 | int retval; |
| 1036 | |
| 1037 | if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop))) |
| 1038 | return -EFAULT; |
| 1039 | |
| 1040 | dev->adapter_shutdown = 1; |
| 1041 | |
| 1042 | mutex_unlock(&dev->ioctl_mutex); |
| 1043 | retval = aac_reset_adapter(dev, 0, reset.reset_type); |
| 1044 | mutex_lock(&dev->ioctl_mutex); |
| 1045 | |
| 1046 | return retval; |
| 1047 | } |
| 1048 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1049 | int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1050 | { |
| 1051 | int status; |
| 1052 | |
| 1053 | mutex_lock(&dev->ioctl_mutex); |
| 1054 | |
| 1055 | if (dev->adapter_shutdown) { |
| 1056 | status = -EACCES; |
| 1057 | goto cleanup; |
| 1058 | } |
| 1059 | |
| 1060 | /* |
| 1061 | * HBA gets first crack |
| 1062 | */ |
| 1063 | |
| 1064 | status = aac_dev_ioctl(dev, cmd, arg); |
| 1065 | if (status != -ENOTTY) |
| 1066 | goto cleanup; |
| 1067 | |
| 1068 | switch (cmd) { |
| 1069 | case FSACTL_MINIPORT_REV_CHECK: |
| 1070 | status = check_revision(dev, arg); |
| 1071 | break; |
| 1072 | case FSACTL_SEND_LARGE_FIB: |
| 1073 | case FSACTL_SENDFIB: |
| 1074 | status = ioctl_send_fib(dev, arg); |
| 1075 | break; |
| 1076 | case FSACTL_OPEN_GET_ADAPTER_FIB: |
| 1077 | status = open_getadapter_fib(dev, arg); |
| 1078 | break; |
| 1079 | case FSACTL_GET_NEXT_ADAPTER_FIB: |
| 1080 | status = next_getadapter_fib(dev, arg); |
| 1081 | break; |
| 1082 | case FSACTL_CLOSE_GET_ADAPTER_FIB: |
| 1083 | status = close_getadapter_fib(dev, arg); |
| 1084 | break; |
| 1085 | case FSACTL_SEND_RAW_SRB: |
| 1086 | status = aac_send_raw_srb(dev,arg); |
| 1087 | break; |
| 1088 | case FSACTL_GET_PCI_INFO: |
| 1089 | status = aac_get_pci_info(dev,arg); |
| 1090 | break; |
| 1091 | case FSACTL_GET_HBA_INFO: |
| 1092 | status = aac_get_hba_info(dev, arg); |
| 1093 | break; |
| 1094 | case FSACTL_RESET_IOP: |
| 1095 | status = aac_send_reset_adapter(dev, arg); |
| 1096 | break; |
| 1097 | |
| 1098 | default: |
| 1099 | status = -ENOTTY; |
| 1100 | break; |
| 1101 | } |
| 1102 | |
| 1103 | cleanup: |
| 1104 | mutex_unlock(&dev->ioctl_mutex); |
| 1105 | |
| 1106 | return status; |
| 1107 | } |
| 1108 | |