blob: 1f41537d52a58e089486407a78e8d2019590a120 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41#include <linux/slab.h>
42#include "pm8001_sas.h"
43#include "pm8001_chips.h"
44
45static struct scsi_transport_template *pm8001_stt;
46
47/**
48 * chip info structure to identify chip key functionality as
49 * encryption available/not, no of ports, hw specific function ref
50 */
51static const struct pm8001_chip_info pm8001_chips[] = {
52 [chip_8001] = {0, 8, &pm8001_8001_dispatch,},
53 [chip_8008] = {0, 8, &pm8001_80xx_dispatch,},
54 [chip_8009] = {1, 8, &pm8001_80xx_dispatch,},
55 [chip_8018] = {0, 16, &pm8001_80xx_dispatch,},
56 [chip_8019] = {1, 16, &pm8001_80xx_dispatch,},
57 [chip_8074] = {0, 8, &pm8001_80xx_dispatch,},
58 [chip_8076] = {0, 16, &pm8001_80xx_dispatch,},
59 [chip_8077] = {0, 16, &pm8001_80xx_dispatch,},
60 [chip_8006] = {0, 16, &pm8001_80xx_dispatch,},
61 [chip_8070] = {0, 8, &pm8001_80xx_dispatch,},
62 [chip_8072] = {0, 16, &pm8001_80xx_dispatch,},
63};
64static int pm8001_id;
65
66LIST_HEAD(hba_list);
67
68struct workqueue_struct *pm8001_wq;
69
70/**
71 * The main structure which LLDD must register for scsi core.
72 */
73static struct scsi_host_template pm8001_sht = {
74 .module = THIS_MODULE,
75 .name = DRV_NAME,
76 .queuecommand = sas_queuecommand,
77 .target_alloc = sas_target_alloc,
78 .slave_configure = sas_slave_configure,
79 .scan_finished = pm8001_scan_finished,
80 .scan_start = pm8001_scan_start,
81 .change_queue_depth = sas_change_queue_depth,
82 .bios_param = sas_bios_param,
83 .can_queue = 1,
84 .this_id = -1,
85 .sg_tablesize = SG_ALL,
86 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087 .eh_device_reset_handler = sas_eh_device_reset_handler,
88 .eh_target_reset_handler = sas_eh_target_reset_handler,
Olivier Deprez0e641232021-09-23 10:07:05 +020089 .slave_alloc = sas_slave_alloc,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090 .target_destroy = sas_target_destroy,
91 .ioctl = sas_ioctl,
92 .shost_attrs = pm8001_host_attrs,
93 .track_queue_depth = 1,
94};
95
96/**
97 * Sas layer call this function to execute specific task.
98 */
99static struct sas_domain_function_template pm8001_transport_ops = {
100 .lldd_dev_found = pm8001_dev_found,
101 .lldd_dev_gone = pm8001_dev_gone,
102
103 .lldd_execute_task = pm8001_queue_command,
104 .lldd_control_phy = pm8001_phy_control,
105
106 .lldd_abort_task = pm8001_abort_task,
107 .lldd_abort_task_set = pm8001_abort_task_set,
108 .lldd_clear_aca = pm8001_clear_aca,
109 .lldd_clear_task_set = pm8001_clear_task_set,
110 .lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset,
111 .lldd_lu_reset = pm8001_lu_reset,
112 .lldd_query_task = pm8001_query_task,
113};
114
115/**
116 *pm8001_phy_init - initiate our adapter phys
117 *@pm8001_ha: our hba structure.
118 *@phy_id: phy id.
119 */
120static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
121{
122 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
123 struct asd_sas_phy *sas_phy = &phy->sas_phy;
David Brazdil0f672f62019-12-10 10:32:29 +0000124 phy->phy_state = PHY_LINK_DISABLE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000125 phy->pm8001_ha = pm8001_ha;
126 sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
127 sas_phy->class = SAS;
128 sas_phy->iproto = SAS_PROTOCOL_ALL;
129 sas_phy->tproto = 0;
130 sas_phy->type = PHY_TYPE_PHYSICAL;
131 sas_phy->role = PHY_ROLE_INITIATOR;
132 sas_phy->oob_mode = OOB_NOT_CONNECTED;
133 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
134 sas_phy->id = phy_id;
135 sas_phy->sas_addr = (u8 *)&phy->dev_sas_addr;
136 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
137 sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata;
138 sas_phy->lldd_phy = phy;
139}
140
141/**
142 *pm8001_free - free hba
143 *@pm8001_ha: our hba structure.
144 *
145 */
146static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
147{
148 int i;
149
150 if (!pm8001_ha)
151 return;
152
153 for (i = 0; i < USI_MAX_MEMCNT; i++) {
154 if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
David Brazdil0f672f62019-12-10 10:32:29 +0000155 dma_free_coherent(&pm8001_ha->pdev->dev,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156 (pm8001_ha->memoryMap.region[i].total_len +
157 pm8001_ha->memoryMap.region[i].alignment),
158 pm8001_ha->memoryMap.region[i].virt_ptr,
159 pm8001_ha->memoryMap.region[i].phys_addr);
160 }
161 }
162 PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
163 flush_workqueue(pm8001_wq);
164 kfree(pm8001_ha->tags);
165 kfree(pm8001_ha);
166}
167
168#ifdef PM8001_USE_TASKLET
169
170/**
171 * tasklet for 64 msi-x interrupt handler
172 * @opaque: the passed general host adapter struct
173 * Note: pm8001_tasklet is common for pm8001 & pm80xx
174 */
175static void pm8001_tasklet(unsigned long opaque)
176{
177 struct pm8001_hba_info *pm8001_ha;
178 struct isr_param *irq_vector;
179
180 irq_vector = (struct isr_param *)opaque;
181 pm8001_ha = irq_vector->drv_inst;
182 if (unlikely(!pm8001_ha))
183 BUG_ON(1);
184 PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
185}
186#endif
187
188/**
189 * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
190 * It obtains the vector number and calls the equivalent bottom
191 * half or services directly.
192 * @opaque: the passed outbound queue/vector. Host structure is
193 * retrieved from the same.
194 */
195static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
196{
197 struct isr_param *irq_vector;
198 struct pm8001_hba_info *pm8001_ha;
199 irqreturn_t ret = IRQ_HANDLED;
200 irq_vector = (struct isr_param *)opaque;
201 pm8001_ha = irq_vector->drv_inst;
202
203 if (unlikely(!pm8001_ha))
204 return IRQ_NONE;
David Brazdil0f672f62019-12-10 10:32:29 +0000205 if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000206 return IRQ_NONE;
207#ifdef PM8001_USE_TASKLET
208 tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]);
209#else
210 ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
211#endif
212 return ret;
213}
214
215/**
216 * pm8001_interrupt_handler_intx - main INTx interrupt handler.
217 * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
218 */
219
220static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
221{
222 struct pm8001_hba_info *pm8001_ha;
223 irqreturn_t ret = IRQ_HANDLED;
224 struct sas_ha_struct *sha = dev_id;
225 pm8001_ha = sha->lldd_ha;
226 if (unlikely(!pm8001_ha))
227 return IRQ_NONE;
David Brazdil0f672f62019-12-10 10:32:29 +0000228 if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000229 return IRQ_NONE;
230
231#ifdef PM8001_USE_TASKLET
232 tasklet_schedule(&pm8001_ha->tasklet[0]);
233#else
234 ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
235#endif
236 return ret;
237}
238
239/**
240 * pm8001_alloc - initiate our hba structure and 6 DMAs area.
241 * @pm8001_ha:our hba structure.
242 *
243 */
244static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
245 const struct pci_device_id *ent)
246{
247 int i;
248 spin_lock_init(&pm8001_ha->lock);
249 spin_lock_init(&pm8001_ha->bitmap_lock);
250 PM8001_INIT_DBG(pm8001_ha,
251 pm8001_printk("pm8001_alloc: PHY:%x\n",
252 pm8001_ha->chip->n_phy));
253 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
254 pm8001_phy_init(pm8001_ha, i);
255 pm8001_ha->port[i].wide_port_phymap = 0;
256 pm8001_ha->port[i].port_attached = 0;
257 pm8001_ha->port[i].port_state = 0;
258 INIT_LIST_HEAD(&pm8001_ha->port[i].list);
259 }
260
261 pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
262 if (!pm8001_ha->tags)
263 goto err_out;
264 /* MPI Memory region 1 for AAP Event Log for fw */
265 pm8001_ha->memoryMap.region[AAP1].num_elements = 1;
266 pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE;
267 pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE;
268 pm8001_ha->memoryMap.region[AAP1].alignment = 32;
269
270 /* MPI Memory region 2 for IOP Event Log for fw */
271 pm8001_ha->memoryMap.region[IOP].num_elements = 1;
272 pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE;
273 pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
274 pm8001_ha->memoryMap.region[IOP].alignment = 32;
275
276 for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
277 /* MPI Memory region 3 for consumer Index of inbound queues */
278 pm8001_ha->memoryMap.region[CI+i].num_elements = 1;
279 pm8001_ha->memoryMap.region[CI+i].element_size = 4;
280 pm8001_ha->memoryMap.region[CI+i].total_len = 4;
281 pm8001_ha->memoryMap.region[CI+i].alignment = 4;
282
283 if ((ent->driver_data) != chip_8001) {
284 /* MPI Memory region 5 inbound queues */
285 pm8001_ha->memoryMap.region[IB+i].num_elements =
286 PM8001_MPI_QUEUE;
287 pm8001_ha->memoryMap.region[IB+i].element_size = 128;
288 pm8001_ha->memoryMap.region[IB+i].total_len =
289 PM8001_MPI_QUEUE * 128;
290 pm8001_ha->memoryMap.region[IB+i].alignment = 128;
291 } else {
292 pm8001_ha->memoryMap.region[IB+i].num_elements =
293 PM8001_MPI_QUEUE;
294 pm8001_ha->memoryMap.region[IB+i].element_size = 64;
295 pm8001_ha->memoryMap.region[IB+i].total_len =
296 PM8001_MPI_QUEUE * 64;
297 pm8001_ha->memoryMap.region[IB+i].alignment = 64;
298 }
299 }
300
301 for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
302 /* MPI Memory region 4 for producer Index of outbound queues */
303 pm8001_ha->memoryMap.region[PI+i].num_elements = 1;
304 pm8001_ha->memoryMap.region[PI+i].element_size = 4;
305 pm8001_ha->memoryMap.region[PI+i].total_len = 4;
306 pm8001_ha->memoryMap.region[PI+i].alignment = 4;
307
308 if (ent->driver_data != chip_8001) {
309 /* MPI Memory region 6 Outbound queues */
310 pm8001_ha->memoryMap.region[OB+i].num_elements =
311 PM8001_MPI_QUEUE;
312 pm8001_ha->memoryMap.region[OB+i].element_size = 128;
313 pm8001_ha->memoryMap.region[OB+i].total_len =
314 PM8001_MPI_QUEUE * 128;
315 pm8001_ha->memoryMap.region[OB+i].alignment = 128;
316 } else {
317 /* MPI Memory region 6 Outbound queues */
318 pm8001_ha->memoryMap.region[OB+i].num_elements =
319 PM8001_MPI_QUEUE;
320 pm8001_ha->memoryMap.region[OB+i].element_size = 64;
321 pm8001_ha->memoryMap.region[OB+i].total_len =
322 PM8001_MPI_QUEUE * 64;
323 pm8001_ha->memoryMap.region[OB+i].alignment = 64;
324 }
325
326 }
327 /* Memory region write DMA*/
328 pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
329 pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
330 pm8001_ha->memoryMap.region[NVMD].total_len = 4096;
331 /* Memory region for devices*/
332 pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1;
333 pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES *
334 sizeof(struct pm8001_device);
335 pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES *
336 sizeof(struct pm8001_device);
337
338 /* Memory region for ccb_info*/
339 pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1;
340 pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB *
341 sizeof(struct pm8001_ccb_info);
342 pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
343 sizeof(struct pm8001_ccb_info);
344
345 /* Memory region for fw flash */
346 pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
347
348 pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1;
349 pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000;
350 pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000;
351 pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000;
352 for (i = 0; i < USI_MAX_MEMCNT; i++) {
353 if (pm8001_mem_alloc(pm8001_ha->pdev,
354 &pm8001_ha->memoryMap.region[i].virt_ptr,
355 &pm8001_ha->memoryMap.region[i].phys_addr,
356 &pm8001_ha->memoryMap.region[i].phys_addr_hi,
357 &pm8001_ha->memoryMap.region[i].phys_addr_lo,
358 pm8001_ha->memoryMap.region[i].total_len,
359 pm8001_ha->memoryMap.region[i].alignment) != 0) {
360 PM8001_FAIL_DBG(pm8001_ha,
361 pm8001_printk("Mem%d alloc failed\n",
362 i));
363 goto err_out;
364 }
365 }
366
367 pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
368 for (i = 0; i < PM8001_MAX_DEVICES; i++) {
369 pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
370 pm8001_ha->devices[i].id = i;
371 pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
372 pm8001_ha->devices[i].running_req = 0;
373 }
374 pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr;
375 for (i = 0; i < PM8001_MAX_CCB; i++) {
376 pm8001_ha->ccb_info[i].ccb_dma_handle =
377 pm8001_ha->memoryMap.region[CCB_MEM].phys_addr +
378 i * sizeof(struct pm8001_ccb_info);
379 pm8001_ha->ccb_info[i].task = NULL;
380 pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
381 pm8001_ha->ccb_info[i].device = NULL;
382 ++pm8001_ha->tags_num;
383 }
384 pm8001_ha->flags = PM8001F_INIT_TIME;
385 /* Initialize tags */
386 pm8001_tag_init(pm8001_ha);
387 return 0;
388err_out:
389 return 1;
390}
391
392/**
393 * pm8001_ioremap - remap the pci high physical address to kernal virtual
394 * address so that we can access them.
395 * @pm8001_ha:our hba structure.
396 */
397static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
398{
399 u32 bar;
400 u32 logicalBar = 0;
401 struct pci_dev *pdev;
402
403 pdev = pm8001_ha->pdev;
404 /* map pci mem (PMC pci base 0-3)*/
405 for (bar = 0; bar < 6; bar++) {
406 /*
407 ** logical BARs for SPC:
408 ** bar 0 and 1 - logical BAR0
409 ** bar 2 and 3 - logical BAR1
410 ** bar4 - logical BAR2
411 ** bar5 - logical BAR3
412 ** Skip the appropriate assignments:
413 */
414 if ((bar == 1) || (bar == 3))
415 continue;
416 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
417 pm8001_ha->io_mem[logicalBar].membase =
418 pci_resource_start(pdev, bar);
419 pm8001_ha->io_mem[logicalBar].memsize =
420 pci_resource_len(pdev, bar);
421 pm8001_ha->io_mem[logicalBar].memvirtaddr =
422 ioremap(pm8001_ha->io_mem[logicalBar].membase,
423 pm8001_ha->io_mem[logicalBar].memsize);
424 PM8001_INIT_DBG(pm8001_ha,
425 pm8001_printk("PCI: bar %d, logicalBar %d ",
426 bar, logicalBar));
427 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
428 "base addr %llx virt_addr=%llx len=%d\n",
429 (u64)pm8001_ha->io_mem[logicalBar].membase,
430 (u64)(unsigned long)
431 pm8001_ha->io_mem[logicalBar].memvirtaddr,
432 pm8001_ha->io_mem[logicalBar].memsize));
433 } else {
434 pm8001_ha->io_mem[logicalBar].membase = 0;
435 pm8001_ha->io_mem[logicalBar].memsize = 0;
436 pm8001_ha->io_mem[logicalBar].memvirtaddr = 0;
437 }
438 logicalBar++;
439 }
440 return 0;
441}
442
443/**
444 * pm8001_pci_alloc - initialize our ha card structure
445 * @pdev: pci device.
446 * @ent: ent
447 * @shost: scsi host struct which has been initialized before.
448 */
449static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
450 const struct pci_device_id *ent,
451 struct Scsi_Host *shost)
452
453{
454 struct pm8001_hba_info *pm8001_ha;
455 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
456 int j;
457
458 pm8001_ha = sha->lldd_ha;
459 if (!pm8001_ha)
460 return NULL;
461
462 pm8001_ha->pdev = pdev;
463 pm8001_ha->dev = &pdev->dev;
464 pm8001_ha->chip_id = ent->driver_data;
465 pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
466 pm8001_ha->irq = pdev->irq;
467 pm8001_ha->sas = sha;
468 pm8001_ha->shost = shost;
469 pm8001_ha->id = pm8001_id++;
470 pm8001_ha->logging_level = 0x01;
471 sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
472 /* IOMB size is 128 for 8088/89 controllers */
473 if (pm8001_ha->chip_id != chip_8001)
474 pm8001_ha->iomb_size = IOMB_SIZE_SPCV;
475 else
476 pm8001_ha->iomb_size = IOMB_SIZE_SPC;
477
478#ifdef PM8001_USE_TASKLET
479 /* Tasklet for non msi-x interrupt handler */
480 if ((!pdev->msix_cap || !pci_msi_enabled())
481 || (pm8001_ha->chip_id == chip_8001))
482 tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
483 (unsigned long)&(pm8001_ha->irq_vector[0]));
484 else
485 for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
486 tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
487 (unsigned long)&(pm8001_ha->irq_vector[j]));
488#endif
489 pm8001_ioremap(pm8001_ha);
490 if (!pm8001_alloc(pm8001_ha, ent))
491 return pm8001_ha;
492 pm8001_free(pm8001_ha);
493 return NULL;
494}
495
496/**
497 * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit
498 * @pdev: pci device.
499 */
500static int pci_go_44(struct pci_dev *pdev)
501{
502 int rc;
503
David Brazdil0f672f62019-12-10 10:32:29 +0000504 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
505 if (rc) {
506 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
507 if (rc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000508 dev_printk(KERN_ERR, &pdev->dev,
509 "32-bit DMA enable failed\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000510 }
511 return rc;
512}
513
514/**
515 * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them.
516 * @shost: scsi host which has been allocated outside.
517 * @chip_info: our ha struct.
518 */
519static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost,
520 const struct pm8001_chip_info *chip_info)
521{
522 int phy_nr, port_nr;
523 struct asd_sas_phy **arr_phy;
524 struct asd_sas_port **arr_port;
525 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
526
527 phy_nr = chip_info->n_phy;
528 port_nr = phy_nr;
529 memset(sha, 0x00, sizeof(*sha));
530 arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
531 if (!arr_phy)
532 goto exit;
533 arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
534 if (!arr_port)
535 goto exit_free2;
536
537 sha->sas_phy = arr_phy;
538 sha->sas_port = arr_port;
539 sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL);
540 if (!sha->lldd_ha)
541 goto exit_free1;
542
543 shost->transportt = pm8001_stt;
544 shost->max_id = PM8001_MAX_DEVICES;
545 shost->max_lun = 8;
546 shost->max_channel = 0;
547 shost->unique_id = pm8001_id;
548 shost->max_cmd_len = 16;
549 shost->can_queue = PM8001_CAN_QUEUE;
550 shost->cmd_per_lun = 32;
551 return 0;
552exit_free1:
553 kfree(arr_port);
554exit_free2:
555 kfree(arr_phy);
556exit:
557 return -1;
558}
559
560/**
561 * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas
562 * @shost: scsi host which has been allocated outside
563 * @chip_info: our ha struct.
564 */
565static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
566 const struct pm8001_chip_info *chip_info)
567{
568 int i = 0;
569 struct pm8001_hba_info *pm8001_ha;
570 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
571
572 pm8001_ha = sha->lldd_ha;
573 for (i = 0; i < chip_info->n_phy; i++) {
574 sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy;
575 sha->sas_port[i] = &pm8001_ha->port[i].sas_port;
576 sha->sas_phy[i]->sas_addr =
577 (u8 *)&pm8001_ha->phy[i].dev_sas_addr;
578 }
579 sha->sas_ha_name = DRV_NAME;
580 sha->dev = pm8001_ha->dev;
581 sha->strict_wide_ports = 1;
582 sha->lldd_module = THIS_MODULE;
583 sha->sas_addr = &pm8001_ha->sas_addr[0];
584 sha->num_phys = chip_info->n_phy;
585 sha->core.shost = shost;
586}
587
588/**
589 * pm8001_init_sas_add - initialize sas address
590 * @chip_info: our ha struct.
591 *
592 * Currently we just set the fixed SAS address to our HBA,for manufacture,
593 * it should read from the EEPROM
594 */
595static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
596{
597 u8 i, j;
598 u8 sas_add[8];
599#ifdef PM8001_READ_VPD
600 /* For new SPC controllers WWN is stored in flash vpd
601 * For SPC/SPCve controllers WWN is stored in EEPROM
602 * For Older SPC WWN is stored in NVMD
603 */
604 DECLARE_COMPLETION_ONSTACK(completion);
605 struct pm8001_ioctl_payload payload;
606 u16 deviceid;
607 int rc;
608
609 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
610 pm8001_ha->nvmd_completion = &completion;
611
612 if (pm8001_ha->chip_id == chip_8001) {
613 if (deviceid == 0x8081 || deviceid == 0x0042) {
614 payload.minor_function = 4;
615 payload.length = 4096;
616 } else {
617 payload.minor_function = 0;
618 payload.length = 128;
619 }
620 } else if ((pm8001_ha->chip_id == chip_8070 ||
621 pm8001_ha->chip_id == chip_8072) &&
622 pm8001_ha->pdev->subsystem_vendor == PCI_VENDOR_ID_ATTO) {
623 payload.minor_function = 4;
624 payload.length = 4096;
625 } else {
626 payload.minor_function = 1;
627 payload.length = 4096;
628 }
629 payload.offset = 0;
630 payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
631 if (!payload.func_specific) {
632 PM8001_INIT_DBG(pm8001_ha, pm8001_printk("mem alloc fail\n"));
633 return;
634 }
635 rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
636 if (rc) {
637 kfree(payload.func_specific);
638 PM8001_INIT_DBG(pm8001_ha, pm8001_printk("nvmd failed\n"));
639 return;
640 }
641 wait_for_completion(&completion);
642
643 for (i = 0, j = 0; i <= 7; i++, j++) {
644 if (pm8001_ha->chip_id == chip_8001) {
645 if (deviceid == 0x8081)
646 pm8001_ha->sas_addr[j] =
647 payload.func_specific[0x704 + i];
648 else if (deviceid == 0x0042)
649 pm8001_ha->sas_addr[j] =
650 payload.func_specific[0x010 + i];
651 } else if ((pm8001_ha->chip_id == chip_8070 ||
652 pm8001_ha->chip_id == chip_8072) &&
653 pm8001_ha->pdev->subsystem_vendor == PCI_VENDOR_ID_ATTO) {
654 pm8001_ha->sas_addr[j] =
655 payload.func_specific[0x010 + i];
656 } else
657 pm8001_ha->sas_addr[j] =
658 payload.func_specific[0x804 + i];
659 }
660 memcpy(sas_add, pm8001_ha->sas_addr, SAS_ADDR_SIZE);
661 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
662 if (i && ((i % 4) == 0))
663 sas_add[7] = sas_add[7] + 4;
664 memcpy(&pm8001_ha->phy[i].dev_sas_addr,
665 sas_add, SAS_ADDR_SIZE);
666 PM8001_INIT_DBG(pm8001_ha,
667 pm8001_printk("phy %d sas_addr = %016llx\n", i,
668 pm8001_ha->phy[i].dev_sas_addr));
669 }
670 kfree(payload.func_specific);
671#else
672 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
673 pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
674 pm8001_ha->phy[i].dev_sas_addr =
675 cpu_to_be64((u64)
676 (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
677 }
678 memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
679 SAS_ADDR_SIZE);
680#endif
681}
682
683/*
684 * pm8001_get_phy_settings_info : Read phy setting values.
685 * @pm8001_ha : our hba.
686 */
687static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
688{
689
690#ifdef PM8001_READ_VPD
691 /*OPTION ROM FLASH read for the SPC cards */
692 DECLARE_COMPLETION_ONSTACK(completion);
693 struct pm8001_ioctl_payload payload;
694 int rc;
695
696 pm8001_ha->nvmd_completion = &completion;
697 /* SAS ADDRESS read from flash / EEPROM */
698 payload.minor_function = 6;
699 payload.offset = 0;
700 payload.length = 4096;
701 payload.func_specific = kzalloc(4096, GFP_KERNEL);
702 if (!payload.func_specific)
703 return -ENOMEM;
704 /* Read phy setting values from flash */
705 rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
706 if (rc) {
707 kfree(payload.func_specific);
708 PM8001_INIT_DBG(pm8001_ha, pm8001_printk("nvmd failed\n"));
709 return -ENOMEM;
710 }
711 wait_for_completion(&completion);
712 pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
713 kfree(payload.func_specific);
714#endif
715 return 0;
716}
717
718struct pm8001_mpi3_phy_pg_trx_config {
719 u32 LaneLosCfg;
720 u32 LanePgaCfg1;
721 u32 LanePisoCfg1;
722 u32 LanePisoCfg2;
723 u32 LanePisoCfg3;
724 u32 LanePisoCfg4;
725 u32 LanePisoCfg5;
726 u32 LanePisoCfg6;
727 u32 LaneBctCtrl;
728};
729
730/**
731 * pm8001_get_internal_phy_settings : Retrieves the internal PHY settings
732 * @pm8001_ha : our adapter
733 * @phycfg : PHY config page to populate
734 */
735static
736void pm8001_get_internal_phy_settings(struct pm8001_hba_info *pm8001_ha,
737 struct pm8001_mpi3_phy_pg_trx_config *phycfg)
738{
739 phycfg->LaneLosCfg = 0x00000132;
740 phycfg->LanePgaCfg1 = 0x00203949;
741 phycfg->LanePisoCfg1 = 0x000000FF;
742 phycfg->LanePisoCfg2 = 0xFF000001;
743 phycfg->LanePisoCfg3 = 0xE7011300;
744 phycfg->LanePisoCfg4 = 0x631C40C0;
745 phycfg->LanePisoCfg5 = 0xF8102036;
746 phycfg->LanePisoCfg6 = 0xF74A1000;
747 phycfg->LaneBctCtrl = 0x00FB33F8;
748}
749
750/**
751 * pm8001_get_external_phy_settings : Retrieves the external PHY settings
752 * @pm8001_ha : our adapter
753 * @phycfg : PHY config page to populate
754 */
755static
756void pm8001_get_external_phy_settings(struct pm8001_hba_info *pm8001_ha,
757 struct pm8001_mpi3_phy_pg_trx_config *phycfg)
758{
759 phycfg->LaneLosCfg = 0x00000132;
760 phycfg->LanePgaCfg1 = 0x00203949;
761 phycfg->LanePisoCfg1 = 0x000000FF;
762 phycfg->LanePisoCfg2 = 0xFF000001;
763 phycfg->LanePisoCfg3 = 0xE7011300;
764 phycfg->LanePisoCfg4 = 0x63349140;
765 phycfg->LanePisoCfg5 = 0xF8102036;
766 phycfg->LanePisoCfg6 = 0xF80D9300;
767 phycfg->LaneBctCtrl = 0x00FB33F8;
768}
769
770/**
771 * pm8001_get_phy_mask : Retrieves the mask that denotes if a PHY is int/ext
772 * @pm8001_ha : our adapter
773 * @phymask : The PHY mask
774 */
775static
776void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask)
777{
778 switch (pm8001_ha->pdev->subsystem_device) {
779 case 0x0070: /* H1280 - 8 external 0 internal */
780 case 0x0072: /* H12F0 - 16 external 0 internal */
781 *phymask = 0x0000;
782 break;
783
784 case 0x0071: /* H1208 - 0 external 8 internal */
785 case 0x0073: /* H120F - 0 external 16 internal */
786 *phymask = 0xFFFF;
787 break;
788
789 case 0x0080: /* H1244 - 4 external 4 internal */
790 *phymask = 0x00F0;
791 break;
792
793 case 0x0081: /* H1248 - 4 external 8 internal */
794 *phymask = 0x0FF0;
795 break;
796
797 case 0x0082: /* H1288 - 8 external 8 internal */
798 *phymask = 0xFF00;
799 break;
800
801 default:
802 PM8001_INIT_DBG(pm8001_ha,
803 pm8001_printk("Unknown subsystem device=0x%.04x",
804 pm8001_ha->pdev->subsystem_device));
805 }
806}
807
808/**
809 * pm8001_set_phy_settings_ven_117c_12Gb : Configure ATTO 12Gb PHY settings
810 * @pm8001_ha : our adapter
811 */
812static
813int pm8001_set_phy_settings_ven_117c_12G(struct pm8001_hba_info *pm8001_ha)
814{
815 struct pm8001_mpi3_phy_pg_trx_config phycfg_int;
816 struct pm8001_mpi3_phy_pg_trx_config phycfg_ext;
817 int phymask = 0;
818 int i = 0;
819
820 memset(&phycfg_int, 0, sizeof(phycfg_int));
821 memset(&phycfg_ext, 0, sizeof(phycfg_ext));
822
823 pm8001_get_internal_phy_settings(pm8001_ha, &phycfg_int);
824 pm8001_get_external_phy_settings(pm8001_ha, &phycfg_ext);
825 pm8001_get_phy_mask(pm8001_ha, &phymask);
826
827 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
828 if (phymask & (1 << i)) {/* Internal PHY */
829 pm8001_set_phy_profile_single(pm8001_ha, i,
830 sizeof(phycfg_int) / sizeof(u32),
831 (u32 *)&phycfg_int);
832
833 } else { /* External PHY */
834 pm8001_set_phy_profile_single(pm8001_ha, i,
835 sizeof(phycfg_ext) / sizeof(u32),
836 (u32 *)&phycfg_ext);
837 }
838 }
839
840 return 0;
841}
842
843/**
844 * pm8001_configure_phy_settings : Configures PHY settings based on vendor ID.
845 * @pm8001_ha : our hba.
846 */
847static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
848{
849 switch (pm8001_ha->pdev->subsystem_vendor) {
850 case PCI_VENDOR_ID_ATTO:
851 if (pm8001_ha->pdev->device == 0x0042) /* 6Gb */
852 return 0;
853 else
854 return pm8001_set_phy_settings_ven_117c_12G(pm8001_ha);
855
856 case PCI_VENDOR_ID_ADAPTEC2:
857 case 0:
858 return 0;
859
860 default:
861 return pm8001_get_phy_settings_info(pm8001_ha);
862 }
863}
864
865#ifdef PM8001_USE_MSIX
866/**
867 * pm8001_setup_msix - enable MSI-X interrupt
868 * @chip_info: our ha struct.
869 * @irq_handler: irq_handler
870 */
871static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
872{
873 u32 i = 0, j = 0;
874 u32 number_of_intr;
875 int flag = 0;
876 int rc;
877 static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
878
879 /* SPCv controllers supports 64 msi-x */
880 if (pm8001_ha->chip_id == chip_8001) {
881 number_of_intr = 1;
882 } else {
883 number_of_intr = PM8001_MAX_MSIX_VEC;
884 flag &= ~IRQF_SHARED;
885 }
886
887 rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr,
888 number_of_intr, PCI_IRQ_MSIX);
889 if (rc < 0)
890 return rc;
891 pm8001_ha->number_of_intr = number_of_intr;
892
893 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
894 "pci_alloc_irq_vectors request ret:%d no of intr %d\n",
895 rc, pm8001_ha->number_of_intr));
896
897 for (i = 0; i < number_of_intr; i++) {
898 snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
899 DRV_NAME"%d", i);
900 pm8001_ha->irq_vector[i].irq_id = i;
901 pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
902
903 rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i),
904 pm8001_interrupt_handler_msix, flag,
905 intr_drvname[i], &(pm8001_ha->irq_vector[i]));
906 if (rc) {
907 for (j = 0; j < i; j++) {
908 free_irq(pci_irq_vector(pm8001_ha->pdev, i),
909 &(pm8001_ha->irq_vector[i]));
910 }
911 pci_free_irq_vectors(pm8001_ha->pdev);
912 break;
913 }
914 }
915
916 return rc;
917}
918#endif
919
920/**
921 * pm8001_request_irq - register interrupt
922 * @chip_info: our ha struct.
923 */
924static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
925{
926 struct pci_dev *pdev;
927 int rc;
928
929 pdev = pm8001_ha->pdev;
930
931#ifdef PM8001_USE_MSIX
932 if (pdev->msix_cap && pci_msi_enabled())
933 return pm8001_setup_msix(pm8001_ha);
934 else {
935 PM8001_INIT_DBG(pm8001_ha,
936 pm8001_printk("MSIX not supported!!!\n"));
937 goto intx;
938 }
939#endif
940
941intx:
942 /* initialize the INT-X interrupt */
943 pm8001_ha->irq_vector[0].irq_id = 0;
944 pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
945 rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
946 DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
947 return rc;
948}
949
950/**
951 * pm8001_pci_probe - probe supported device
952 * @pdev: pci device which kernel has been prepared for.
953 * @ent: pci device id
954 *
955 * This function is the main initialization function, when register a new
956 * pci driver it is invoked, all struct an hardware initilization should be done
957 * here, also, register interrupt
958 */
959static int pm8001_pci_probe(struct pci_dev *pdev,
960 const struct pci_device_id *ent)
961{
962 unsigned int rc;
963 u32 pci_reg;
964 u8 i = 0;
965 struct pm8001_hba_info *pm8001_ha;
966 struct Scsi_Host *shost = NULL;
967 const struct pm8001_chip_info *chip;
968
969 dev_printk(KERN_INFO, &pdev->dev,
970 "pm80xx: driver version %s\n", DRV_VERSION);
971 rc = pci_enable_device(pdev);
972 if (rc)
973 goto err_out_enable;
974 pci_set_master(pdev);
975 /*
976 * Enable pci slot busmaster by setting pci command register.
977 * This is required by FW for Cyclone card.
978 */
979
980 pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg);
981 pci_reg |= 0x157;
982 pci_write_config_dword(pdev, PCI_COMMAND, pci_reg);
983 rc = pci_request_regions(pdev, DRV_NAME);
984 if (rc)
985 goto err_out_disable;
986 rc = pci_go_44(pdev);
987 if (rc)
988 goto err_out_regions;
989
990 shost = scsi_host_alloc(&pm8001_sht, sizeof(void *));
991 if (!shost) {
992 rc = -ENOMEM;
993 goto err_out_regions;
994 }
995 chip = &pm8001_chips[ent->driver_data];
996 SHOST_TO_SAS_HA(shost) =
997 kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
998 if (!SHOST_TO_SAS_HA(shost)) {
999 rc = -ENOMEM;
1000 goto err_out_free_host;
1001 }
1002
1003 rc = pm8001_prep_sas_ha_init(shost, chip);
1004 if (rc) {
1005 rc = -ENOMEM;
1006 goto err_out_free;
1007 }
1008 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
1009 /* ent->driver variable is used to differentiate between controllers */
1010 pm8001_ha = pm8001_pci_alloc(pdev, ent, shost);
1011 if (!pm8001_ha) {
1012 rc = -ENOMEM;
1013 goto err_out_free;
1014 }
1015 list_add_tail(&pm8001_ha->list, &hba_list);
1016 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
1017 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
1018 if (rc) {
1019 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
1020 "chip_init failed [ret: %d]\n", rc));
1021 goto err_out_ha_free;
1022 }
1023
1024 rc = scsi_add_host(shost, &pdev->dev);
1025 if (rc)
1026 goto err_out_ha_free;
1027 rc = pm8001_request_irq(pm8001_ha);
1028 if (rc) {
1029 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
1030 "pm8001_request_irq failed [ret: %d]\n", rc));
1031 goto err_out_shost;
1032 }
1033
1034 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
1035 if (pm8001_ha->chip_id != chip_8001) {
1036 for (i = 1; i < pm8001_ha->number_of_intr; i++)
1037 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
1038 /* setup thermal configuration. */
1039 pm80xx_set_thermal_config(pm8001_ha);
1040 }
1041
1042 pm8001_init_sas_add(pm8001_ha);
1043 /* phy setting support for motherboard controller */
Olivier Deprez0e641232021-09-23 10:07:05 +02001044 rc = pm8001_configure_phy_settings(pm8001_ha);
1045 if (rc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001046 goto err_out_shost;
1047
1048 pm8001_post_sas_ha_init(shost, chip);
1049 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
1050 if (rc)
1051 goto err_out_shost;
1052 scsi_scan_host(pm8001_ha->shost);
David Brazdil0f672f62019-12-10 10:32:29 +00001053 pm8001_ha->flags = PM8001F_RUN_TIME;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001054 return 0;
1055
1056err_out_shost:
1057 scsi_remove_host(pm8001_ha->shost);
1058err_out_ha_free:
1059 pm8001_free(pm8001_ha);
1060err_out_free:
1061 kfree(SHOST_TO_SAS_HA(shost));
1062err_out_free_host:
1063 scsi_host_put(shost);
1064err_out_regions:
1065 pci_release_regions(pdev);
1066err_out_disable:
1067 pci_disable_device(pdev);
1068err_out_enable:
1069 return rc;
1070}
1071
1072static void pm8001_pci_remove(struct pci_dev *pdev)
1073{
1074 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
1075 struct pm8001_hba_info *pm8001_ha;
1076 int i, j;
1077 pm8001_ha = sha->lldd_ha;
1078 sas_unregister_ha(sha);
1079 sas_remove_host(pm8001_ha->shost);
1080 list_del(&pm8001_ha->list);
1081 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
1082 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
1083
1084#ifdef PM8001_USE_MSIX
1085 for (i = 0; i < pm8001_ha->number_of_intr; i++)
1086 synchronize_irq(pci_irq_vector(pdev, i));
1087 for (i = 0; i < pm8001_ha->number_of_intr; i++)
1088 free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
1089 pci_free_irq_vectors(pdev);
1090#else
1091 free_irq(pm8001_ha->irq, sha);
1092#endif
1093#ifdef PM8001_USE_TASKLET
1094 /* For non-msix and msix interrupts */
1095 if ((!pdev->msix_cap || !pci_msi_enabled()) ||
1096 (pm8001_ha->chip_id == chip_8001))
1097 tasklet_kill(&pm8001_ha->tasklet[0]);
1098 else
1099 for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
1100 tasklet_kill(&pm8001_ha->tasklet[j]);
1101#endif
1102 scsi_host_put(pm8001_ha->shost);
1103 pm8001_free(pm8001_ha);
1104 kfree(sha->sas_phy);
1105 kfree(sha->sas_port);
1106 kfree(sha);
1107 pci_release_regions(pdev);
1108 pci_disable_device(pdev);
1109}
1110
1111/**
1112 * pm8001_pci_suspend - power management suspend main entry point
1113 * @pdev: PCI device struct
1114 * @state: PM state change to (usually PCI_D3)
1115 *
1116 * Returns 0 success, anything else error.
1117 */
1118static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1119{
1120 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
1121 struct pm8001_hba_info *pm8001_ha;
1122 int i, j;
1123 u32 device_state;
1124 pm8001_ha = sha->lldd_ha;
1125 sas_suspend_ha(sha);
1126 flush_workqueue(pm8001_wq);
1127 scsi_block_requests(pm8001_ha->shost);
1128 if (!pdev->pm_cap) {
1129 dev_err(&pdev->dev, " PCI PM not supported\n");
1130 return -ENODEV;
1131 }
1132 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
1133 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
1134#ifdef PM8001_USE_MSIX
1135 for (i = 0; i < pm8001_ha->number_of_intr; i++)
1136 synchronize_irq(pci_irq_vector(pdev, i));
1137 for (i = 0; i < pm8001_ha->number_of_intr; i++)
1138 free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
1139 pci_free_irq_vectors(pdev);
1140#else
1141 free_irq(pm8001_ha->irq, sha);
1142#endif
1143#ifdef PM8001_USE_TASKLET
1144 /* For non-msix and msix interrupts */
1145 if ((!pdev->msix_cap || !pci_msi_enabled()) ||
1146 (pm8001_ha->chip_id == chip_8001))
1147 tasklet_kill(&pm8001_ha->tasklet[0]);
1148 else
1149 for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
1150 tasklet_kill(&pm8001_ha->tasklet[j]);
1151#endif
1152 device_state = pci_choose_state(pdev, state);
1153 pm8001_printk("pdev=0x%p, slot=%s, entering "
1154 "operating state [D%d]\n", pdev,
1155 pm8001_ha->name, device_state);
1156 pci_save_state(pdev);
1157 pci_disable_device(pdev);
1158 pci_set_power_state(pdev, device_state);
1159 return 0;
1160}
1161
1162/**
1163 * pm8001_pci_resume - power management resume main entry point
1164 * @pdev: PCI device struct
1165 *
1166 * Returns 0 success, anything else error.
1167 */
1168static int pm8001_pci_resume(struct pci_dev *pdev)
1169{
1170 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
1171 struct pm8001_hba_info *pm8001_ha;
1172 int rc;
1173 u8 i = 0, j;
1174 u32 device_state;
1175 DECLARE_COMPLETION_ONSTACK(completion);
1176 pm8001_ha = sha->lldd_ha;
1177 device_state = pdev->current_state;
1178
1179 pm8001_printk("pdev=0x%p, slot=%s, resuming from previous "
1180 "operating state [D%d]\n", pdev, pm8001_ha->name, device_state);
1181
1182 pci_set_power_state(pdev, PCI_D0);
1183 pci_enable_wake(pdev, PCI_D0, 0);
1184 pci_restore_state(pdev);
1185 rc = pci_enable_device(pdev);
1186 if (rc) {
1187 pm8001_printk("slot=%s Enable device failed during resume\n",
1188 pm8001_ha->name);
1189 goto err_out_enable;
1190 }
1191
1192 pci_set_master(pdev);
1193 rc = pci_go_44(pdev);
1194 if (rc)
1195 goto err_out_disable;
1196 sas_prep_resume_ha(sha);
1197 /* chip soft rst only for spc */
1198 if (pm8001_ha->chip_id == chip_8001) {
1199 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
1200 PM8001_INIT_DBG(pm8001_ha,
1201 pm8001_printk("chip soft reset successful\n"));
1202 }
1203 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
1204 if (rc)
1205 goto err_out_disable;
1206
1207 /* disable all the interrupt bits */
1208 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
1209
1210 rc = pm8001_request_irq(pm8001_ha);
1211 if (rc)
1212 goto err_out_disable;
1213#ifdef PM8001_USE_TASKLET
1214 /* Tasklet for non msi-x interrupt handler */
1215 if ((!pdev->msix_cap || !pci_msi_enabled()) ||
1216 (pm8001_ha->chip_id == chip_8001))
1217 tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
1218 (unsigned long)&(pm8001_ha->irq_vector[0]));
1219 else
1220 for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
1221 tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
1222 (unsigned long)&(pm8001_ha->irq_vector[j]));
1223#endif
1224 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
1225 if (pm8001_ha->chip_id != chip_8001) {
1226 for (i = 1; i < pm8001_ha->number_of_intr; i++)
1227 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
1228 }
1229
1230 /* Chip documentation for the 8070 and 8072 SPCv */
1231 /* states that a 500ms minimum delay is required */
1232 /* before issuing commands. Otherwise, the firmware */
1233 /* will enter an unrecoverable state. */
1234
1235 if (pm8001_ha->chip_id == chip_8070 ||
1236 pm8001_ha->chip_id == chip_8072) {
1237 mdelay(500);
1238 }
1239
1240 /* Spin up the PHYs */
1241
1242 pm8001_ha->flags = PM8001F_RUN_TIME;
1243 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
1244 pm8001_ha->phy[i].enable_completion = &completion;
1245 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
1246 wait_for_completion(&completion);
1247 }
1248 sas_resume_ha(sha);
1249 return 0;
1250
1251err_out_disable:
1252 scsi_remove_host(pm8001_ha->shost);
1253 pci_disable_device(pdev);
1254err_out_enable:
1255 return rc;
1256}
1257
1258/* update of pci device, vendor id and driver data with
1259 * unique value for each of the controller
1260 */
1261static struct pci_device_id pm8001_pci_table[] = {
1262 { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
1263 { PCI_VDEVICE(PMC_Sierra, 0x8006), chip_8006 },
1264 { PCI_VDEVICE(ADAPTEC2, 0x8006), chip_8006 },
1265 { PCI_VDEVICE(ATTO, 0x0042), chip_8001 },
1266 /* Support for SPC/SPCv/SPCve controllers */
1267 { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
1268 { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
1269 { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 },
1270 { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 },
1271 { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 },
1272 { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 },
1273 { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
1274 { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
1275 { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
1276 { PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 },
1277 { PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 },
1278 { PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 },
1279 { PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 },
1280 { PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 },
1281 { PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 },
1282 { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
1283 PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
1284 { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
1285 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 },
1286 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1287 PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 },
1288 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1289 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 },
1290 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1291 PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 },
1292 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1293 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 },
1294 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1295 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 },
1296 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1297 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 },
1298 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1299 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
1300 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1301 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
1302 { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
1303 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 },
1304 { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
1305 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 },
1306 { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
1307 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 },
1308 { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
1309 PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 },
1310 { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
1311 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 },
1312 { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
1313 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 },
1314 { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
1315 PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 },
1316 { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
1317 PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 },
1318 { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
1319 PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 },
1320 { PCI_VENDOR_ID_ATTO, 0x8070,
1321 PCI_VENDOR_ID_ATTO, 0x0070, 0, 0, chip_8070 },
1322 { PCI_VENDOR_ID_ATTO, 0x8070,
1323 PCI_VENDOR_ID_ATTO, 0x0071, 0, 0, chip_8070 },
1324 { PCI_VENDOR_ID_ATTO, 0x8072,
1325 PCI_VENDOR_ID_ATTO, 0x0072, 0, 0, chip_8072 },
1326 { PCI_VENDOR_ID_ATTO, 0x8072,
1327 PCI_VENDOR_ID_ATTO, 0x0073, 0, 0, chip_8072 },
1328 { PCI_VENDOR_ID_ATTO, 0x8070,
1329 PCI_VENDOR_ID_ATTO, 0x0080, 0, 0, chip_8070 },
1330 { PCI_VENDOR_ID_ATTO, 0x8072,
1331 PCI_VENDOR_ID_ATTO, 0x0081, 0, 0, chip_8072 },
1332 { PCI_VENDOR_ID_ATTO, 0x8072,
1333 PCI_VENDOR_ID_ATTO, 0x0082, 0, 0, chip_8072 },
1334 {} /* terminate list */
1335};
1336
1337static struct pci_driver pm8001_pci_driver = {
1338 .name = DRV_NAME,
1339 .id_table = pm8001_pci_table,
1340 .probe = pm8001_pci_probe,
1341 .remove = pm8001_pci_remove,
1342 .suspend = pm8001_pci_suspend,
1343 .resume = pm8001_pci_resume,
1344};
1345
1346/**
1347 * pm8001_init - initialize scsi transport template
1348 */
1349static int __init pm8001_init(void)
1350{
1351 int rc = -ENOMEM;
1352
1353 pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
1354 if (!pm8001_wq)
1355 goto err;
1356
1357 pm8001_id = 0;
1358 pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
1359 if (!pm8001_stt)
1360 goto err_wq;
1361 rc = pci_register_driver(&pm8001_pci_driver);
1362 if (rc)
1363 goto err_tp;
1364 return 0;
1365
1366err_tp:
1367 sas_release_transport(pm8001_stt);
1368err_wq:
1369 destroy_workqueue(pm8001_wq);
1370err:
1371 return rc;
1372}
1373
1374static void __exit pm8001_exit(void)
1375{
1376 pci_unregister_driver(&pm8001_pci_driver);
1377 sas_release_transport(pm8001_stt);
1378 destroy_workqueue(pm8001_wq);
1379}
1380
1381module_init(pm8001_init);
1382module_exit(pm8001_exit);
1383
1384MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
1385MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
1386MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
1387MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>");
1388MODULE_DESCRIPTION(
1389 "PMC-Sierra PM8001/8006/8081/8088/8089/8074/8076/8077/8070/8072 "
1390 "SAS/SATA controller driver");
1391MODULE_VERSION(DRV_VERSION);
1392MODULE_LICENSE("GPL");
1393MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
1394