David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * linux/drivers/misc/xillybus_pcie.c |
| 4 | * |
| 5 | * Copyright 2011 Xillybus Ltd, http://xillybus.com |
| 6 | * |
| 7 | * Driver for the Xillybus FPGA/host framework using PCI Express. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/pci.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | #include <linux/slab.h> |
| 13 | #include "xillybus.h" |
| 14 | |
| 15 | MODULE_DESCRIPTION("Xillybus driver for PCIe"); |
| 16 | MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); |
| 17 | MODULE_VERSION("1.06"); |
| 18 | MODULE_ALIAS("xillybus_pcie"); |
| 19 | MODULE_LICENSE("GPL v2"); |
| 20 | |
| 21 | #define PCI_DEVICE_ID_XILLYBUS 0xebeb |
| 22 | |
| 23 | #define PCI_VENDOR_ID_ACTEL 0x11aa |
| 24 | #define PCI_VENDOR_ID_LATTICE 0x1204 |
| 25 | |
| 26 | static const char xillyname[] = "xillybus_pcie"; |
| 27 | |
| 28 | static const struct pci_device_id xillyids[] = { |
| 29 | {PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILLYBUS)}, |
| 30 | {PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_XILLYBUS)}, |
| 31 | {PCI_DEVICE(PCI_VENDOR_ID_ACTEL, PCI_DEVICE_ID_XILLYBUS)}, |
| 32 | {PCI_DEVICE(PCI_VENDOR_ID_LATTICE, PCI_DEVICE_ID_XILLYBUS)}, |
| 33 | { /* End: all zeroes */ } |
| 34 | }; |
| 35 | |
| 36 | static int xilly_pci_direction(int direction) |
| 37 | { |
| 38 | switch (direction) { |
| 39 | case DMA_TO_DEVICE: |
| 40 | return PCI_DMA_TODEVICE; |
| 41 | case DMA_FROM_DEVICE: |
| 42 | return PCI_DMA_FROMDEVICE; |
| 43 | default: |
| 44 | return PCI_DMA_BIDIRECTIONAL; |
| 45 | } |
| 46 | } |
| 47 | |
| 48 | static void xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint *ep, |
| 49 | dma_addr_t dma_handle, |
| 50 | size_t size, |
| 51 | int direction) |
| 52 | { |
| 53 | pci_dma_sync_single_for_cpu(ep->pdev, |
| 54 | dma_handle, |
| 55 | size, |
| 56 | xilly_pci_direction(direction)); |
| 57 | } |
| 58 | |
| 59 | static void xilly_dma_sync_single_for_device_pci(struct xilly_endpoint *ep, |
| 60 | dma_addr_t dma_handle, |
| 61 | size_t size, |
| 62 | int direction) |
| 63 | { |
| 64 | pci_dma_sync_single_for_device(ep->pdev, |
| 65 | dma_handle, |
| 66 | size, |
| 67 | xilly_pci_direction(direction)); |
| 68 | } |
| 69 | |
| 70 | static void xilly_pci_unmap(void *ptr) |
| 71 | { |
| 72 | struct xilly_mapping *data = ptr; |
| 73 | |
| 74 | pci_unmap_single(data->device, data->dma_addr, |
| 75 | data->size, data->direction); |
| 76 | |
| 77 | kfree(ptr); |
| 78 | } |
| 79 | |
| 80 | /* |
| 81 | * Map either through the PCI DMA mapper or the non_PCI one. Behind the |
| 82 | * scenes exactly the same functions are called with the same parameters, |
| 83 | * but that can change. |
| 84 | */ |
| 85 | |
| 86 | static int xilly_map_single_pci(struct xilly_endpoint *ep, |
| 87 | void *ptr, |
| 88 | size_t size, |
| 89 | int direction, |
| 90 | dma_addr_t *ret_dma_handle |
| 91 | ) |
| 92 | { |
| 93 | int pci_direction; |
| 94 | dma_addr_t addr; |
| 95 | struct xilly_mapping *this; |
| 96 | |
| 97 | this = kzalloc(sizeof(*this), GFP_KERNEL); |
| 98 | if (!this) |
| 99 | return -ENOMEM; |
| 100 | |
| 101 | pci_direction = xilly_pci_direction(direction); |
| 102 | |
| 103 | addr = pci_map_single(ep->pdev, ptr, size, pci_direction); |
| 104 | |
| 105 | if (pci_dma_mapping_error(ep->pdev, addr)) { |
| 106 | kfree(this); |
| 107 | return -ENODEV; |
| 108 | } |
| 109 | |
| 110 | this->device = ep->pdev; |
| 111 | this->dma_addr = addr; |
| 112 | this->size = size; |
| 113 | this->direction = pci_direction; |
| 114 | |
| 115 | *ret_dma_handle = addr; |
| 116 | |
| 117 | return devm_add_action_or_reset(ep->dev, xilly_pci_unmap, this); |
| 118 | } |
| 119 | |
| 120 | static struct xilly_endpoint_hardware pci_hw = { |
| 121 | .owner = THIS_MODULE, |
| 122 | .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci, |
| 123 | .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci, |
| 124 | .map_single = xilly_map_single_pci, |
| 125 | }; |
| 126 | |
| 127 | static int xilly_probe(struct pci_dev *pdev, |
| 128 | const struct pci_device_id *ent) |
| 129 | { |
| 130 | struct xilly_endpoint *endpoint; |
| 131 | int rc; |
| 132 | |
| 133 | endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw); |
| 134 | |
| 135 | if (!endpoint) |
| 136 | return -ENOMEM; |
| 137 | |
| 138 | pci_set_drvdata(pdev, endpoint); |
| 139 | |
| 140 | rc = pcim_enable_device(pdev); |
| 141 | if (rc) { |
| 142 | dev_err(endpoint->dev, |
| 143 | "pcim_enable_device() failed. Aborting.\n"); |
| 144 | return rc; |
| 145 | } |
| 146 | |
| 147 | /* L0s has caused packet drops. No power saving, thank you. */ |
| 148 | |
| 149 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); |
| 150 | |
| 151 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
| 152 | dev_err(endpoint->dev, |
| 153 | "Incorrect BAR configuration. Aborting.\n"); |
| 154 | return -ENODEV; |
| 155 | } |
| 156 | |
| 157 | rc = pcim_iomap_regions(pdev, 0x01, xillyname); |
| 158 | if (rc) { |
| 159 | dev_err(endpoint->dev, |
| 160 | "pcim_iomap_regions() failed. Aborting.\n"); |
| 161 | return rc; |
| 162 | } |
| 163 | |
| 164 | endpoint->registers = pcim_iomap_table(pdev)[0]; |
| 165 | |
| 166 | pci_set_master(pdev); |
| 167 | |
| 168 | /* Set up a single MSI interrupt */ |
| 169 | if (pci_enable_msi(pdev)) { |
| 170 | dev_err(endpoint->dev, |
| 171 | "Failed to enable MSI interrupts. Aborting.\n"); |
| 172 | return -ENODEV; |
| 173 | } |
| 174 | rc = devm_request_irq(&pdev->dev, pdev->irq, xillybus_isr, 0, |
| 175 | xillyname, endpoint); |
| 176 | if (rc) { |
| 177 | dev_err(endpoint->dev, |
| 178 | "Failed to register MSI handler. Aborting.\n"); |
| 179 | return -ENODEV; |
| 180 | } |
| 181 | |
| 182 | /* |
| 183 | * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets, |
| 184 | * even when the PCIe driver claims that a 64-bit mask is OK. On the |
| 185 | * other hand, on some architectures, 64-bit addressing is mandatory. |
| 186 | * So go for the 64-bit mask only when failing is the other option. |
| 187 | */ |
| 188 | |
| 189 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { |
| 190 | endpoint->dma_using_dac = 0; |
| 191 | } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
| 192 | endpoint->dma_using_dac = 1; |
| 193 | } else { |
| 194 | dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n"); |
| 195 | return -ENODEV; |
| 196 | } |
| 197 | |
| 198 | return xillybus_endpoint_discovery(endpoint); |
| 199 | } |
| 200 | |
| 201 | static void xilly_remove(struct pci_dev *pdev) |
| 202 | { |
| 203 | struct xilly_endpoint *endpoint = pci_get_drvdata(pdev); |
| 204 | |
| 205 | xillybus_endpoint_remove(endpoint); |
| 206 | } |
| 207 | |
| 208 | MODULE_DEVICE_TABLE(pci, xillyids); |
| 209 | |
| 210 | static struct pci_driver xillybus_driver = { |
| 211 | .name = xillyname, |
| 212 | .id_table = xillyids, |
| 213 | .probe = xilly_probe, |
| 214 | .remove = xilly_remove, |
| 215 | }; |
| 216 | |
| 217 | module_pci_driver(xillybus_driver); |