David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Marvell 88SE64xx/88SE94xx pci init |
| 4 | * |
| 5 | * Copyright 2007 Red Hat, Inc. |
| 6 | * Copyright 2008 Marvell. <kewei@marvell.com> |
| 7 | * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | |
| 11 | #include "mv_sas.h" |
| 12 | |
| 13 | int interrupt_coalescing = 0x80; |
| 14 | |
| 15 | static struct scsi_transport_template *mvs_stt; |
| 16 | static const struct mvs_chip_info mvs_chips[] = { |
| 17 | [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, |
| 18 | [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, |
| 19 | [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, }, |
| 20 | [chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, |
| 21 | [chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, |
| 22 | [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, }, |
| 23 | [chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, }, |
| 24 | [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, |
| 25 | [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, |
| 26 | }; |
| 27 | |
| 28 | struct device_attribute *mvst_host_attrs[]; |
| 29 | |
| 30 | #define SOC_SAS_NUM 2 |
| 31 | |
| 32 | static struct scsi_host_template mvs_sht = { |
| 33 | .module = THIS_MODULE, |
| 34 | .name = DRV_NAME, |
| 35 | .queuecommand = sas_queuecommand, |
| 36 | .target_alloc = sas_target_alloc, |
| 37 | .slave_configure = sas_slave_configure, |
| 38 | .scan_finished = mvs_scan_finished, |
| 39 | .scan_start = mvs_scan_start, |
| 40 | .change_queue_depth = sas_change_queue_depth, |
| 41 | .bios_param = sas_bios_param, |
| 42 | .can_queue = 1, |
| 43 | .this_id = -1, |
| 44 | .sg_tablesize = SG_ALL, |
| 45 | .max_sectors = SCSI_DEFAULT_MAX_SECTORS, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 46 | .eh_device_reset_handler = sas_eh_device_reset_handler, |
| 47 | .eh_target_reset_handler = sas_eh_target_reset_handler, |
| 48 | .target_destroy = sas_target_destroy, |
| 49 | .ioctl = sas_ioctl, |
| 50 | .shost_attrs = mvst_host_attrs, |
| 51 | .track_queue_depth = 1, |
| 52 | }; |
| 53 | |
| 54 | static struct sas_domain_function_template mvs_transport_ops = { |
| 55 | .lldd_dev_found = mvs_dev_found, |
| 56 | .lldd_dev_gone = mvs_dev_gone, |
| 57 | .lldd_execute_task = mvs_queue_command, |
| 58 | .lldd_control_phy = mvs_phy_control, |
| 59 | |
| 60 | .lldd_abort_task = mvs_abort_task, |
| 61 | .lldd_abort_task_set = mvs_abort_task_set, |
| 62 | .lldd_clear_aca = mvs_clear_aca, |
| 63 | .lldd_clear_task_set = mvs_clear_task_set, |
| 64 | .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, |
| 65 | .lldd_lu_reset = mvs_lu_reset, |
| 66 | .lldd_query_task = mvs_query_task, |
| 67 | .lldd_port_formed = mvs_port_formed, |
| 68 | .lldd_port_deformed = mvs_port_deformed, |
| 69 | |
| 70 | .lldd_write_gpio = mvs_gpio_write, |
| 71 | |
| 72 | }; |
| 73 | |
| 74 | static void mvs_phy_init(struct mvs_info *mvi, int phy_id) |
| 75 | { |
| 76 | struct mvs_phy *phy = &mvi->phy[phy_id]; |
| 77 | struct asd_sas_phy *sas_phy = &phy->sas_phy; |
| 78 | |
| 79 | phy->mvi = mvi; |
| 80 | phy->port = NULL; |
| 81 | timer_setup(&phy->timer, NULL, 0); |
| 82 | sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; |
| 83 | sas_phy->class = SAS; |
| 84 | sas_phy->iproto = SAS_PROTOCOL_ALL; |
| 85 | sas_phy->tproto = 0; |
| 86 | sas_phy->type = PHY_TYPE_PHYSICAL; |
| 87 | sas_phy->role = PHY_ROLE_INITIATOR; |
| 88 | sas_phy->oob_mode = OOB_NOT_CONNECTED; |
| 89 | sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; |
| 90 | |
| 91 | sas_phy->id = phy_id; |
| 92 | sas_phy->sas_addr = &mvi->sas_addr[0]; |
| 93 | sas_phy->frame_rcvd = &phy->frame_rcvd[0]; |
| 94 | sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata; |
| 95 | sas_phy->lldd_phy = phy; |
| 96 | } |
| 97 | |
| 98 | static void mvs_free(struct mvs_info *mvi) |
| 99 | { |
| 100 | struct mvs_wq *mwq; |
| 101 | int slot_nr; |
| 102 | |
| 103 | if (!mvi) |
| 104 | return; |
| 105 | |
| 106 | if (mvi->flags & MVF_FLAG_SOC) |
| 107 | slot_nr = MVS_SOC_SLOTS; |
| 108 | else |
| 109 | slot_nr = MVS_CHIP_SLOT_SZ; |
| 110 | |
| 111 | dma_pool_destroy(mvi->dma_pool); |
| 112 | |
| 113 | if (mvi->tx) |
| 114 | dma_free_coherent(mvi->dev, |
| 115 | sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, |
| 116 | mvi->tx, mvi->tx_dma); |
| 117 | if (mvi->rx_fis) |
| 118 | dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ, |
| 119 | mvi->rx_fis, mvi->rx_fis_dma); |
| 120 | if (mvi->rx) |
| 121 | dma_free_coherent(mvi->dev, |
| 122 | sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), |
| 123 | mvi->rx, mvi->rx_dma); |
| 124 | if (mvi->slot) |
| 125 | dma_free_coherent(mvi->dev, |
| 126 | sizeof(*mvi->slot) * slot_nr, |
| 127 | mvi->slot, mvi->slot_dma); |
| 128 | |
| 129 | if (mvi->bulk_buffer) |
| 130 | dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, |
| 131 | mvi->bulk_buffer, mvi->bulk_buffer_dma); |
| 132 | if (mvi->bulk_buffer1) |
| 133 | dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, |
| 134 | mvi->bulk_buffer1, mvi->bulk_buffer_dma1); |
| 135 | |
| 136 | MVS_CHIP_DISP->chip_iounmap(mvi); |
| 137 | if (mvi->shost) |
| 138 | scsi_host_put(mvi->shost); |
| 139 | list_for_each_entry(mwq, &mvi->wq_list, entry) |
| 140 | cancel_delayed_work(&mwq->work_q); |
| 141 | kfree(mvi->tags); |
| 142 | kfree(mvi); |
| 143 | } |
| 144 | |
| 145 | #ifdef CONFIG_SCSI_MVSAS_TASKLET |
| 146 | static void mvs_tasklet(unsigned long opaque) |
| 147 | { |
| 148 | u32 stat; |
| 149 | u16 core_nr, i = 0; |
| 150 | |
| 151 | struct mvs_info *mvi; |
| 152 | struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque; |
| 153 | |
| 154 | core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; |
| 155 | mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; |
| 156 | |
| 157 | if (unlikely(!mvi)) |
| 158 | BUG_ON(1); |
| 159 | |
| 160 | stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq); |
| 161 | if (!stat) |
| 162 | goto out; |
| 163 | |
| 164 | for (i = 0; i < core_nr; i++) { |
| 165 | mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; |
| 166 | MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat); |
| 167 | } |
| 168 | out: |
| 169 | MVS_CHIP_DISP->interrupt_enable(mvi); |
| 170 | |
| 171 | } |
| 172 | #endif |
| 173 | |
| 174 | static irqreturn_t mvs_interrupt(int irq, void *opaque) |
| 175 | { |
| 176 | u32 core_nr; |
| 177 | u32 stat; |
| 178 | struct mvs_info *mvi; |
| 179 | struct sas_ha_struct *sha = opaque; |
| 180 | #ifndef CONFIG_SCSI_MVSAS_TASKLET |
| 181 | u32 i; |
| 182 | #endif |
| 183 | |
| 184 | core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; |
| 185 | mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; |
| 186 | |
| 187 | if (unlikely(!mvi)) |
| 188 | return IRQ_NONE; |
| 189 | #ifdef CONFIG_SCSI_MVSAS_TASKLET |
| 190 | MVS_CHIP_DISP->interrupt_disable(mvi); |
| 191 | #endif |
| 192 | |
| 193 | stat = MVS_CHIP_DISP->isr_status(mvi, irq); |
| 194 | if (!stat) { |
| 195 | #ifdef CONFIG_SCSI_MVSAS_TASKLET |
| 196 | MVS_CHIP_DISP->interrupt_enable(mvi); |
| 197 | #endif |
| 198 | return IRQ_NONE; |
| 199 | } |
| 200 | |
| 201 | #ifdef CONFIG_SCSI_MVSAS_TASKLET |
| 202 | tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); |
| 203 | #else |
| 204 | for (i = 0; i < core_nr; i++) { |
| 205 | mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; |
| 206 | MVS_CHIP_DISP->isr(mvi, irq, stat); |
| 207 | } |
| 208 | #endif |
| 209 | return IRQ_HANDLED; |
| 210 | } |
| 211 | |
| 212 | static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) |
| 213 | { |
| 214 | int i = 0, slot_nr; |
| 215 | char pool_name[32]; |
| 216 | |
| 217 | if (mvi->flags & MVF_FLAG_SOC) |
| 218 | slot_nr = MVS_SOC_SLOTS; |
| 219 | else |
| 220 | slot_nr = MVS_CHIP_SLOT_SZ; |
| 221 | |
| 222 | spin_lock_init(&mvi->lock); |
| 223 | for (i = 0; i < mvi->chip->n_phy; i++) { |
| 224 | mvs_phy_init(mvi, i); |
| 225 | mvi->port[i].wide_port_phymap = 0; |
| 226 | mvi->port[i].port_attached = 0; |
| 227 | INIT_LIST_HEAD(&mvi->port[i].list); |
| 228 | } |
| 229 | for (i = 0; i < MVS_MAX_DEVICES; i++) { |
| 230 | mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; |
| 231 | mvi->devices[i].dev_type = SAS_PHY_UNUSED; |
| 232 | mvi->devices[i].device_id = i; |
| 233 | mvi->devices[i].dev_status = MVS_DEV_NORMAL; |
| 234 | } |
| 235 | |
| 236 | /* |
| 237 | * alloc and init our DMA areas |
| 238 | */ |
| 239 | mvi->tx = dma_alloc_coherent(mvi->dev, |
| 240 | sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, |
| 241 | &mvi->tx_dma, GFP_KERNEL); |
| 242 | if (!mvi->tx) |
| 243 | goto err_out; |
| 244 | memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); |
| 245 | mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ, |
| 246 | &mvi->rx_fis_dma, GFP_KERNEL); |
| 247 | if (!mvi->rx_fis) |
| 248 | goto err_out; |
| 249 | memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); |
| 250 | |
| 251 | mvi->rx = dma_alloc_coherent(mvi->dev, |
| 252 | sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), |
| 253 | &mvi->rx_dma, GFP_KERNEL); |
| 254 | if (!mvi->rx) |
| 255 | goto err_out; |
| 256 | memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); |
| 257 | mvi->rx[0] = cpu_to_le32(0xfff); |
| 258 | mvi->rx_cons = 0xfff; |
| 259 | |
| 260 | mvi->slot = dma_alloc_coherent(mvi->dev, |
| 261 | sizeof(*mvi->slot) * slot_nr, |
| 262 | &mvi->slot_dma, GFP_KERNEL); |
| 263 | if (!mvi->slot) |
| 264 | goto err_out; |
| 265 | memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr); |
| 266 | |
| 267 | mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, |
| 268 | TRASH_BUCKET_SIZE, |
| 269 | &mvi->bulk_buffer_dma, GFP_KERNEL); |
| 270 | if (!mvi->bulk_buffer) |
| 271 | goto err_out; |
| 272 | |
| 273 | mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev, |
| 274 | TRASH_BUCKET_SIZE, |
| 275 | &mvi->bulk_buffer_dma1, GFP_KERNEL); |
| 276 | if (!mvi->bulk_buffer1) |
| 277 | goto err_out; |
| 278 | |
| 279 | sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id); |
| 280 | mvi->dma_pool = dma_pool_create(pool_name, &mvi->pdev->dev, |
| 281 | MVS_SLOT_BUF_SZ, 16, 0); |
| 282 | if (!mvi->dma_pool) { |
| 283 | printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name); |
| 284 | goto err_out; |
| 285 | } |
| 286 | mvi->tags_num = slot_nr; |
| 287 | |
| 288 | /* Initialize tags */ |
| 289 | mvs_tag_init(mvi); |
| 290 | return 0; |
| 291 | err_out: |
| 292 | return 1; |
| 293 | } |
| 294 | |
| 295 | |
| 296 | int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex) |
| 297 | { |
| 298 | unsigned long res_start, res_len, res_flag, res_flag_ex = 0; |
| 299 | struct pci_dev *pdev = mvi->pdev; |
| 300 | if (bar_ex != -1) { |
| 301 | /* |
| 302 | * ioremap main and peripheral registers |
| 303 | */ |
| 304 | res_start = pci_resource_start(pdev, bar_ex); |
| 305 | res_len = pci_resource_len(pdev, bar_ex); |
| 306 | if (!res_start || !res_len) |
| 307 | goto err_out; |
| 308 | |
| 309 | res_flag_ex = pci_resource_flags(pdev, bar_ex); |
| 310 | if (res_flag_ex & IORESOURCE_MEM) |
| 311 | mvi->regs_ex = ioremap(res_start, res_len); |
| 312 | else |
| 313 | mvi->regs_ex = (void *)res_start; |
| 314 | if (!mvi->regs_ex) |
| 315 | goto err_out; |
| 316 | } |
| 317 | |
| 318 | res_start = pci_resource_start(pdev, bar); |
| 319 | res_len = pci_resource_len(pdev, bar); |
| 320 | if (!res_start || !res_len) { |
| 321 | iounmap(mvi->regs_ex); |
| 322 | mvi->regs_ex = NULL; |
| 323 | goto err_out; |
| 324 | } |
| 325 | |
| 326 | res_flag = pci_resource_flags(pdev, bar); |
| 327 | mvi->regs = ioremap(res_start, res_len); |
| 328 | |
| 329 | if (!mvi->regs) { |
| 330 | if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM)) |
| 331 | iounmap(mvi->regs_ex); |
| 332 | mvi->regs_ex = NULL; |
| 333 | goto err_out; |
| 334 | } |
| 335 | |
| 336 | return 0; |
| 337 | err_out: |
| 338 | return -1; |
| 339 | } |
| 340 | |
| 341 | void mvs_iounmap(void __iomem *regs) |
| 342 | { |
| 343 | iounmap(regs); |
| 344 | } |
| 345 | |
| 346 | static struct mvs_info *mvs_pci_alloc(struct pci_dev *pdev, |
| 347 | const struct pci_device_id *ent, |
| 348 | struct Scsi_Host *shost, unsigned int id) |
| 349 | { |
| 350 | struct mvs_info *mvi = NULL; |
| 351 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); |
| 352 | |
| 353 | mvi = kzalloc(sizeof(*mvi) + |
| 354 | (1L << mvs_chips[ent->driver_data].slot_width) * |
| 355 | sizeof(struct mvs_slot_info), GFP_KERNEL); |
| 356 | if (!mvi) |
| 357 | return NULL; |
| 358 | |
| 359 | mvi->pdev = pdev; |
| 360 | mvi->dev = &pdev->dev; |
| 361 | mvi->chip_id = ent->driver_data; |
| 362 | mvi->chip = &mvs_chips[mvi->chip_id]; |
| 363 | INIT_LIST_HEAD(&mvi->wq_list); |
| 364 | |
| 365 | ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; |
| 366 | ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; |
| 367 | |
| 368 | mvi->id = id; |
| 369 | mvi->sas = sha; |
| 370 | mvi->shost = shost; |
| 371 | |
| 372 | mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL); |
| 373 | if (!mvi->tags) |
| 374 | goto err_out; |
| 375 | |
| 376 | if (MVS_CHIP_DISP->chip_ioremap(mvi)) |
| 377 | goto err_out; |
| 378 | if (!mvs_alloc(mvi, shost)) |
| 379 | return mvi; |
| 380 | err_out: |
| 381 | mvs_free(mvi); |
| 382 | return NULL; |
| 383 | } |
| 384 | |
| 385 | static int pci_go_64(struct pci_dev *pdev) |
| 386 | { |
| 387 | int rc; |
| 388 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 389 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
| 390 | if (rc) { |
| 391 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 392 | if (rc) { |
| 393 | dev_printk(KERN_ERR, &pdev->dev, |
| 394 | "32-bit DMA enable failed\n"); |
| 395 | return rc; |
| 396 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 397 | } |
| 398 | |
| 399 | return rc; |
| 400 | } |
| 401 | |
| 402 | static int mvs_prep_sas_ha_init(struct Scsi_Host *shost, |
| 403 | const struct mvs_chip_info *chip_info) |
| 404 | { |
| 405 | int phy_nr, port_nr; unsigned short core_nr; |
| 406 | struct asd_sas_phy **arr_phy; |
| 407 | struct asd_sas_port **arr_port; |
| 408 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); |
| 409 | |
| 410 | core_nr = chip_info->n_host; |
| 411 | phy_nr = core_nr * chip_info->n_phy; |
| 412 | port_nr = phy_nr; |
| 413 | |
| 414 | memset(sha, 0x00, sizeof(struct sas_ha_struct)); |
| 415 | arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); |
| 416 | arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); |
| 417 | if (!arr_phy || !arr_port) |
| 418 | goto exit_free; |
| 419 | |
| 420 | sha->sas_phy = arr_phy; |
| 421 | sha->sas_port = arr_port; |
| 422 | sha->core.shost = shost; |
| 423 | |
| 424 | sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL); |
| 425 | if (!sha->lldd_ha) |
| 426 | goto exit_free; |
| 427 | |
| 428 | ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; |
| 429 | |
| 430 | shost->transportt = mvs_stt; |
| 431 | shost->max_id = MVS_MAX_DEVICES; |
| 432 | shost->max_lun = ~0; |
| 433 | shost->max_channel = 1; |
| 434 | shost->max_cmd_len = 16; |
| 435 | |
| 436 | return 0; |
| 437 | exit_free: |
| 438 | kfree(arr_phy); |
| 439 | kfree(arr_port); |
| 440 | return -1; |
| 441 | |
| 442 | } |
| 443 | |
| 444 | static void mvs_post_sas_ha_init(struct Scsi_Host *shost, |
| 445 | const struct mvs_chip_info *chip_info) |
| 446 | { |
| 447 | int can_queue, i = 0, j = 0; |
| 448 | struct mvs_info *mvi = NULL; |
| 449 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); |
| 450 | unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; |
| 451 | |
| 452 | for (j = 0; j < nr_core; j++) { |
| 453 | mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; |
| 454 | for (i = 0; i < chip_info->n_phy; i++) { |
| 455 | sha->sas_phy[j * chip_info->n_phy + i] = |
| 456 | &mvi->phy[i].sas_phy; |
| 457 | sha->sas_port[j * chip_info->n_phy + i] = |
| 458 | &mvi->port[i].sas_port; |
| 459 | } |
| 460 | } |
| 461 | |
| 462 | sha->sas_ha_name = DRV_NAME; |
| 463 | sha->dev = mvi->dev; |
| 464 | sha->lldd_module = THIS_MODULE; |
| 465 | sha->sas_addr = &mvi->sas_addr[0]; |
| 466 | |
| 467 | sha->num_phys = nr_core * chip_info->n_phy; |
| 468 | |
| 469 | if (mvi->flags & MVF_FLAG_SOC) |
| 470 | can_queue = MVS_SOC_CAN_QUEUE; |
| 471 | else |
| 472 | can_queue = MVS_CHIP_SLOT_SZ; |
| 473 | |
| 474 | shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); |
| 475 | shost->can_queue = can_queue; |
| 476 | mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; |
| 477 | sha->core.shost = mvi->shost; |
| 478 | } |
| 479 | |
| 480 | static void mvs_init_sas_add(struct mvs_info *mvi) |
| 481 | { |
| 482 | u8 i; |
| 483 | for (i = 0; i < mvi->chip->n_phy; i++) { |
| 484 | mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL; |
| 485 | mvi->phy[i].dev_sas_addr = |
| 486 | cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr)); |
| 487 | } |
| 488 | |
| 489 | memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE); |
| 490 | } |
| 491 | |
| 492 | static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 493 | { |
| 494 | unsigned int rc, nhost = 0; |
| 495 | struct mvs_info *mvi; |
| 496 | struct mvs_prv_info *mpi; |
| 497 | irq_handler_t irq_handler = mvs_interrupt; |
| 498 | struct Scsi_Host *shost = NULL; |
| 499 | const struct mvs_chip_info *chip; |
| 500 | |
| 501 | dev_printk(KERN_INFO, &pdev->dev, |
| 502 | "mvsas: driver version %s\n", DRV_VERSION); |
| 503 | rc = pci_enable_device(pdev); |
| 504 | if (rc) |
| 505 | goto err_out_enable; |
| 506 | |
| 507 | pci_set_master(pdev); |
| 508 | |
| 509 | rc = pci_request_regions(pdev, DRV_NAME); |
| 510 | if (rc) |
| 511 | goto err_out_disable; |
| 512 | |
| 513 | rc = pci_go_64(pdev); |
| 514 | if (rc) |
| 515 | goto err_out_regions; |
| 516 | |
| 517 | shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); |
| 518 | if (!shost) { |
| 519 | rc = -ENOMEM; |
| 520 | goto err_out_regions; |
| 521 | } |
| 522 | |
| 523 | chip = &mvs_chips[ent->driver_data]; |
| 524 | SHOST_TO_SAS_HA(shost) = |
| 525 | kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); |
| 526 | if (!SHOST_TO_SAS_HA(shost)) { |
| 527 | scsi_host_put(shost); |
| 528 | rc = -ENOMEM; |
| 529 | goto err_out_regions; |
| 530 | } |
| 531 | |
| 532 | rc = mvs_prep_sas_ha_init(shost, chip); |
| 533 | if (rc) { |
| 534 | scsi_host_put(shost); |
| 535 | rc = -ENOMEM; |
| 536 | goto err_out_regions; |
| 537 | } |
| 538 | |
| 539 | pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); |
| 540 | |
| 541 | do { |
| 542 | mvi = mvs_pci_alloc(pdev, ent, shost, nhost); |
| 543 | if (!mvi) { |
| 544 | rc = -ENOMEM; |
| 545 | goto err_out_regions; |
| 546 | } |
| 547 | |
| 548 | memset(&mvi->hba_info_param, 0xFF, |
| 549 | sizeof(struct hba_info_page)); |
| 550 | |
| 551 | mvs_init_sas_add(mvi); |
| 552 | |
| 553 | mvi->instance = nhost; |
| 554 | rc = MVS_CHIP_DISP->chip_init(mvi); |
| 555 | if (rc) { |
| 556 | mvs_free(mvi); |
| 557 | goto err_out_regions; |
| 558 | } |
| 559 | nhost++; |
| 560 | } while (nhost < chip->n_host); |
| 561 | mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha); |
| 562 | #ifdef CONFIG_SCSI_MVSAS_TASKLET |
| 563 | tasklet_init(&(mpi->mv_tasklet), mvs_tasklet, |
| 564 | (unsigned long)SHOST_TO_SAS_HA(shost)); |
| 565 | #endif |
| 566 | |
| 567 | mvs_post_sas_ha_init(shost, chip); |
| 568 | |
| 569 | rc = scsi_add_host(shost, &pdev->dev); |
| 570 | if (rc) |
| 571 | goto err_out_shost; |
| 572 | |
| 573 | rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); |
| 574 | if (rc) |
| 575 | goto err_out_shost; |
| 576 | rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, |
| 577 | DRV_NAME, SHOST_TO_SAS_HA(shost)); |
| 578 | if (rc) |
| 579 | goto err_not_sas; |
| 580 | |
| 581 | MVS_CHIP_DISP->interrupt_enable(mvi); |
| 582 | |
| 583 | scsi_scan_host(mvi->shost); |
| 584 | |
| 585 | return 0; |
| 586 | |
| 587 | err_not_sas: |
| 588 | sas_unregister_ha(SHOST_TO_SAS_HA(shost)); |
| 589 | err_out_shost: |
| 590 | scsi_remove_host(mvi->shost); |
| 591 | err_out_regions: |
| 592 | pci_release_regions(pdev); |
| 593 | err_out_disable: |
| 594 | pci_disable_device(pdev); |
| 595 | err_out_enable: |
| 596 | return rc; |
| 597 | } |
| 598 | |
| 599 | static void mvs_pci_remove(struct pci_dev *pdev) |
| 600 | { |
| 601 | unsigned short core_nr, i = 0; |
| 602 | struct sas_ha_struct *sha = pci_get_drvdata(pdev); |
| 603 | struct mvs_info *mvi = NULL; |
| 604 | |
| 605 | core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; |
| 606 | mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; |
| 607 | |
| 608 | #ifdef CONFIG_SCSI_MVSAS_TASKLET |
| 609 | tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); |
| 610 | #endif |
| 611 | |
| 612 | sas_unregister_ha(sha); |
| 613 | sas_remove_host(mvi->shost); |
| 614 | |
| 615 | MVS_CHIP_DISP->interrupt_disable(mvi); |
| 616 | free_irq(mvi->pdev->irq, sha); |
| 617 | for (i = 0; i < core_nr; i++) { |
| 618 | mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; |
| 619 | mvs_free(mvi); |
| 620 | } |
| 621 | kfree(sha->sas_phy); |
| 622 | kfree(sha->sas_port); |
| 623 | kfree(sha); |
| 624 | pci_release_regions(pdev); |
| 625 | pci_disable_device(pdev); |
| 626 | return; |
| 627 | } |
| 628 | |
| 629 | static struct pci_device_id mvs_pci_table[] = { |
| 630 | { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, |
| 631 | { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, |
| 632 | { |
| 633 | .vendor = PCI_VENDOR_ID_MARVELL, |
| 634 | .device = 0x6440, |
| 635 | .subvendor = PCI_ANY_ID, |
| 636 | .subdevice = 0x6480, |
| 637 | .class = 0, |
| 638 | .class_mask = 0, |
| 639 | .driver_data = chip_6485, |
| 640 | }, |
| 641 | { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, |
| 642 | { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 }, |
| 643 | { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 }, |
| 644 | { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, |
| 645 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, |
| 646 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, |
| 647 | { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, |
| 648 | { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, |
| 649 | { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, |
| 650 | { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, |
| 651 | { PCI_VDEVICE(TTI, 0x2722), chip_9480 }, |
| 652 | { PCI_VDEVICE(TTI, 0x2740), chip_9480 }, |
| 653 | { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, |
| 654 | { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, |
| 655 | { |
| 656 | .vendor = PCI_VENDOR_ID_MARVELL_EXT, |
| 657 | .device = 0x9480, |
| 658 | .subvendor = PCI_ANY_ID, |
| 659 | .subdevice = 0x9480, |
| 660 | .class = 0, |
| 661 | .class_mask = 0, |
| 662 | .driver_data = chip_9480, |
| 663 | }, |
| 664 | { |
| 665 | .vendor = PCI_VENDOR_ID_MARVELL_EXT, |
| 666 | .device = 0x9445, |
| 667 | .subvendor = PCI_ANY_ID, |
| 668 | .subdevice = 0x9480, |
| 669 | .class = 0, |
| 670 | .class_mask = 0, |
| 671 | .driver_data = chip_9445, |
| 672 | }, |
| 673 | { PCI_VDEVICE(MARVELL_EXT, 0x9485), chip_9485 }, /* Marvell 9480/9485 (any vendor/model) */ |
| 674 | { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ |
| 675 | { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
| 676 | { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
| 677 | { PCI_VDEVICE(OCZ, 0x1041), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
| 678 | { PCI_VDEVICE(OCZ, 0x1042), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
| 679 | { PCI_VDEVICE(OCZ, 0x1043), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
| 680 | { PCI_VDEVICE(OCZ, 0x1044), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
| 681 | { PCI_VDEVICE(OCZ, 0x1080), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
| 682 | { PCI_VDEVICE(OCZ, 0x1083), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
| 683 | { PCI_VDEVICE(OCZ, 0x1084), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ |
| 684 | |
| 685 | { } /* terminate list */ |
| 686 | }; |
| 687 | |
| 688 | static struct pci_driver mvs_pci_driver = { |
| 689 | .name = DRV_NAME, |
| 690 | .id_table = mvs_pci_table, |
| 691 | .probe = mvs_pci_init, |
| 692 | .remove = mvs_pci_remove, |
| 693 | }; |
| 694 | |
| 695 | static ssize_t |
| 696 | mvs_show_driver_version(struct device *cdev, |
| 697 | struct device_attribute *attr, char *buffer) |
| 698 | { |
| 699 | return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION); |
| 700 | } |
| 701 | |
| 702 | static DEVICE_ATTR(driver_version, |
| 703 | S_IRUGO, |
| 704 | mvs_show_driver_version, |
| 705 | NULL); |
| 706 | |
| 707 | static ssize_t |
| 708 | mvs_store_interrupt_coalescing(struct device *cdev, |
| 709 | struct device_attribute *attr, |
| 710 | const char *buffer, size_t size) |
| 711 | { |
| 712 | unsigned int val = 0; |
| 713 | struct mvs_info *mvi = NULL; |
| 714 | struct Scsi_Host *shost = class_to_shost(cdev); |
| 715 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); |
| 716 | u8 i, core_nr; |
| 717 | if (buffer == NULL) |
| 718 | return size; |
| 719 | |
| 720 | if (sscanf(buffer, "%u", &val) != 1) |
| 721 | return -EINVAL; |
| 722 | |
| 723 | if (val >= 0x10000) { |
| 724 | mv_dprintk("interrupt coalescing timer %d us is" |
| 725 | "too long\n", val); |
| 726 | return strlen(buffer); |
| 727 | } |
| 728 | |
| 729 | interrupt_coalescing = val; |
| 730 | |
| 731 | core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; |
| 732 | mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; |
| 733 | |
| 734 | if (unlikely(!mvi)) |
| 735 | return -EINVAL; |
| 736 | |
| 737 | for (i = 0; i < core_nr; i++) { |
| 738 | mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; |
| 739 | if (MVS_CHIP_DISP->tune_interrupt) |
| 740 | MVS_CHIP_DISP->tune_interrupt(mvi, |
| 741 | interrupt_coalescing); |
| 742 | } |
| 743 | mv_dprintk("set interrupt coalescing time to %d us\n", |
| 744 | interrupt_coalescing); |
| 745 | return strlen(buffer); |
| 746 | } |
| 747 | |
| 748 | static ssize_t mvs_show_interrupt_coalescing(struct device *cdev, |
| 749 | struct device_attribute *attr, char *buffer) |
| 750 | { |
| 751 | return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing); |
| 752 | } |
| 753 | |
| 754 | static DEVICE_ATTR(interrupt_coalescing, |
| 755 | S_IRUGO|S_IWUSR, |
| 756 | mvs_show_interrupt_coalescing, |
| 757 | mvs_store_interrupt_coalescing); |
| 758 | |
| 759 | /* task handler */ |
| 760 | struct task_struct *mvs_th; |
| 761 | static int __init mvs_init(void) |
| 762 | { |
| 763 | int rc; |
| 764 | mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); |
| 765 | if (!mvs_stt) |
| 766 | return -ENOMEM; |
| 767 | |
| 768 | rc = pci_register_driver(&mvs_pci_driver); |
| 769 | if (rc) |
| 770 | goto err_out; |
| 771 | |
| 772 | return 0; |
| 773 | |
| 774 | err_out: |
| 775 | sas_release_transport(mvs_stt); |
| 776 | return rc; |
| 777 | } |
| 778 | |
| 779 | static void __exit mvs_exit(void) |
| 780 | { |
| 781 | pci_unregister_driver(&mvs_pci_driver); |
| 782 | sas_release_transport(mvs_stt); |
| 783 | } |
| 784 | |
| 785 | struct device_attribute *mvst_host_attrs[] = { |
| 786 | &dev_attr_driver_version, |
| 787 | &dev_attr_interrupt_coalescing, |
| 788 | NULL, |
| 789 | }; |
| 790 | |
| 791 | module_init(mvs_init); |
| 792 | module_exit(mvs_exit); |
| 793 | |
| 794 | MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); |
| 795 | MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); |
| 796 | MODULE_VERSION(DRV_VERSION); |
| 797 | MODULE_LICENSE("GPL"); |
| 798 | #ifdef CONFIG_PCI |
| 799 | MODULE_DEVICE_TABLE(pci, mvs_pci_table); |
| 800 | #endif |