blob: 9966dcf1d112d9bb33df0b34e86ae4f7f1a7e3d6 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Volume Management Device driver
4 * Copyright (c) 2015, Intel Corporation.
5 */
6
7#include <linux/device.h>
8#include <linux/interrupt.h>
9#include <linux/irq.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/msi.h>
13#include <linux/pci.h>
14#include <linux/srcu.h>
15#include <linux/rculist.h>
16#include <linux/rcupdate.h>
17
18#include <asm/irqdomain.h>
19#include <asm/device.h>
20#include <asm/msi.h>
21#include <asm/msidef.h>
22
23#define VMD_CFGBAR 0
24#define VMD_MEMBAR1 2
25#define VMD_MEMBAR2 4
26
27#define PCI_REG_VMCAP 0x40
28#define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1)
29#define PCI_REG_VMCONFIG 0x44
30#define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3)
31#define PCI_REG_VMLOCK 0x70
32#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
33
David Brazdil0f672f62019-12-10 10:32:29 +000034#define MB2_SHADOW_OFFSET 0x2000
35#define MB2_SHADOW_SIZE 16
36
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037enum vmd_features {
38 /*
39 * Device may contain registers which hint the physical location of the
40 * membars, in order to allow proper address translation during
41 * resource assignment to enable guest virtualization
42 */
43 VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
44
45 /*
46 * Device may provide root port configuration information which limits
47 * bus numbering
48 */
49 VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
50};
51
52/*
53 * Lock for manipulating VMD IRQ lists.
54 */
55static DEFINE_RAW_SPINLOCK(list_lock);
56
57/**
58 * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
59 * @node: list item for parent traversal.
60 * @irq: back pointer to parent.
61 * @enabled: true if driver enabled IRQ
62 * @virq: the virtual IRQ value provided to the requesting driver.
63 *
64 * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
65 * a VMD IRQ using this structure.
66 */
67struct vmd_irq {
68 struct list_head node;
69 struct vmd_irq_list *irq;
70 bool enabled;
71 unsigned int virq;
72};
73
74/**
75 * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
76 * @irq_list: the list of irq's the VMD one demuxes to.
77 * @srcu: SRCU struct for local synchronization.
78 * @count: number of child IRQs assigned to this vector; used to track
79 * sharing.
80 */
81struct vmd_irq_list {
82 struct list_head irq_list;
83 struct srcu_struct srcu;
84 unsigned int count;
85};
86
87struct vmd_dev {
88 struct pci_dev *dev;
89
90 spinlock_t cfg_lock;
91 char __iomem *cfgbar;
92
93 int msix_count;
94 struct vmd_irq_list *irqs;
95
96 struct pci_sysdata sysdata;
97 struct resource resources[3];
98 struct irq_domain *irq_domain;
99 struct pci_bus *bus;
David Brazdil0f672f62019-12-10 10:32:29 +0000100 u8 busn_start;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000101
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102 struct dma_map_ops dma_ops;
103 struct dma_domain dma_domain;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104};
105
106static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
107{
108 return container_of(bus->sysdata, struct vmd_dev, sysdata);
109}
110
111static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
112 struct vmd_irq_list *irqs)
113{
114 return irqs - vmd->irqs;
115}
116
117/*
118 * Drivers managing a device in a VMD domain allocate their own IRQs as before,
119 * but the MSI entry for the hardware it's driving will be programmed with a
120 * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its
121 * domain into one of its own, and the VMD driver de-muxes these for the
122 * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations
123 * and irq_chip to set this up.
124 */
125static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
126{
127 struct vmd_irq *vmdirq = data->chip_data;
128 struct vmd_irq_list *irq = vmdirq->irq;
129 struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
130
131 msg->address_hi = MSI_ADDR_BASE_HI;
132 msg->address_lo = MSI_ADDR_BASE_LO |
133 MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq));
134 msg->data = 0;
135}
136
137/*
138 * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
139 */
140static void vmd_irq_enable(struct irq_data *data)
141{
142 struct vmd_irq *vmdirq = data->chip_data;
143 unsigned long flags;
144
145 raw_spin_lock_irqsave(&list_lock, flags);
146 WARN_ON(vmdirq->enabled);
147 list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
148 vmdirq->enabled = true;
149 raw_spin_unlock_irqrestore(&list_lock, flags);
150
151 data->chip->irq_unmask(data);
152}
153
154static void vmd_irq_disable(struct irq_data *data)
155{
156 struct vmd_irq *vmdirq = data->chip_data;
157 unsigned long flags;
158
159 data->chip->irq_mask(data);
160
161 raw_spin_lock_irqsave(&list_lock, flags);
162 if (vmdirq->enabled) {
163 list_del_rcu(&vmdirq->node);
164 vmdirq->enabled = false;
165 }
166 raw_spin_unlock_irqrestore(&list_lock, flags);
167}
168
169/*
170 * XXX: Stubbed until we develop acceptable way to not create conflicts with
171 * other devices sharing the same vector.
172 */
173static int vmd_irq_set_affinity(struct irq_data *data,
174 const struct cpumask *dest, bool force)
175{
176 return -EINVAL;
177}
178
179static struct irq_chip vmd_msi_controller = {
180 .name = "VMD-MSI",
181 .irq_enable = vmd_irq_enable,
182 .irq_disable = vmd_irq_disable,
183 .irq_compose_msi_msg = vmd_compose_msi_msg,
184 .irq_set_affinity = vmd_irq_set_affinity,
185};
186
187static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
188 msi_alloc_info_t *arg)
189{
190 return 0;
191}
192
193/*
194 * XXX: We can be even smarter selecting the best IRQ once we solve the
195 * affinity problem.
196 */
197static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
198{
199 int i, best = 1;
200 unsigned long flags;
201
202 if (vmd->msix_count == 1)
203 return &vmd->irqs[0];
204
205 /*
206 * White list for fast-interrupt handlers. All others will share the
207 * "slow" interrupt vector.
208 */
209 switch (msi_desc_to_pci_dev(desc)->class) {
210 case PCI_CLASS_STORAGE_EXPRESS:
211 break;
212 default:
213 return &vmd->irqs[0];
214 }
215
216 raw_spin_lock_irqsave(&list_lock, flags);
217 for (i = 1; i < vmd->msix_count; i++)
218 if (vmd->irqs[i].count < vmd->irqs[best].count)
219 best = i;
220 vmd->irqs[best].count++;
221 raw_spin_unlock_irqrestore(&list_lock, flags);
222
223 return &vmd->irqs[best];
224}
225
226static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
227 unsigned int virq, irq_hw_number_t hwirq,
228 msi_alloc_info_t *arg)
229{
230 struct msi_desc *desc = arg->desc;
231 struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
232 struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
233 unsigned int index, vector;
234
235 if (!vmdirq)
236 return -ENOMEM;
237
238 INIT_LIST_HEAD(&vmdirq->node);
239 vmdirq->irq = vmd_next_irq(vmd, desc);
240 vmdirq->virq = virq;
241 index = index_from_irqs(vmd, vmdirq->irq);
242 vector = pci_irq_vector(vmd->dev, index);
243
244 irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
245 handle_untracked_irq, vmd, NULL);
246 return 0;
247}
248
249static void vmd_msi_free(struct irq_domain *domain,
250 struct msi_domain_info *info, unsigned int virq)
251{
252 struct vmd_irq *vmdirq = irq_get_chip_data(virq);
253 unsigned long flags;
254
255 synchronize_srcu(&vmdirq->irq->srcu);
256
257 /* XXX: Potential optimization to rebalance */
258 raw_spin_lock_irqsave(&list_lock, flags);
259 vmdirq->irq->count--;
260 raw_spin_unlock_irqrestore(&list_lock, flags);
261
262 kfree(vmdirq);
263}
264
265static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
266 int nvec, msi_alloc_info_t *arg)
267{
268 struct pci_dev *pdev = to_pci_dev(dev);
269 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
270
271 if (nvec > vmd->msix_count)
272 return vmd->msix_count;
273
274 memset(arg, 0, sizeof(*arg));
275 return 0;
276}
277
278static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
279{
280 arg->desc = desc;
281}
282
283static struct msi_domain_ops vmd_msi_domain_ops = {
284 .get_hwirq = vmd_get_hwirq,
285 .msi_init = vmd_msi_init,
286 .msi_free = vmd_msi_free,
287 .msi_prepare = vmd_msi_prepare,
288 .set_desc = vmd_set_desc,
289};
290
291static struct msi_domain_info vmd_msi_domain_info = {
292 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
293 MSI_FLAG_PCI_MSIX,
294 .ops = &vmd_msi_domain_ops,
295 .chip = &vmd_msi_controller,
296};
297
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000298/*
299 * VMD replaces the requester ID with its own. DMA mappings for devices in a
300 * VMD domain need to be mapped for the VMD, not the device requiring
301 * the mapping.
302 */
303static struct device *to_vmd_dev(struct device *dev)
304{
305 struct pci_dev *pdev = to_pci_dev(dev);
306 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
307
308 return &vmd->dev->dev;
309}
310
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000311static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
312 gfp_t flag, unsigned long attrs)
313{
David Brazdil0f672f62019-12-10 10:32:29 +0000314 return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000315}
316
317static void vmd_free(struct device *dev, size_t size, void *vaddr,
318 dma_addr_t addr, unsigned long attrs)
319{
David Brazdil0f672f62019-12-10 10:32:29 +0000320 return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000321}
322
323static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
324 void *cpu_addr, dma_addr_t addr, size_t size,
325 unsigned long attrs)
326{
David Brazdil0f672f62019-12-10 10:32:29 +0000327 return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
328 attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000329}
330
331static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
332 void *cpu_addr, dma_addr_t addr, size_t size,
333 unsigned long attrs)
334{
David Brazdil0f672f62019-12-10 10:32:29 +0000335 return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size,
336 attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000337}
338
339static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
340 unsigned long offset, size_t size,
341 enum dma_data_direction dir,
342 unsigned long attrs)
343{
David Brazdil0f672f62019-12-10 10:32:29 +0000344 return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
345 attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000346}
347
348static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
349 enum dma_data_direction dir, unsigned long attrs)
350{
David Brazdil0f672f62019-12-10 10:32:29 +0000351 dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000352}
353
354static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
355 enum dma_data_direction dir, unsigned long attrs)
356{
David Brazdil0f672f62019-12-10 10:32:29 +0000357 return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000358}
359
360static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
361 enum dma_data_direction dir, unsigned long attrs)
362{
David Brazdil0f672f62019-12-10 10:32:29 +0000363 dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000364}
365
366static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
367 size_t size, enum dma_data_direction dir)
368{
David Brazdil0f672f62019-12-10 10:32:29 +0000369 dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000370}
371
372static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
373 size_t size, enum dma_data_direction dir)
374{
David Brazdil0f672f62019-12-10 10:32:29 +0000375 dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000376}
377
378static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
379 int nents, enum dma_data_direction dir)
380{
David Brazdil0f672f62019-12-10 10:32:29 +0000381 dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000382}
383
384static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
385 int nents, enum dma_data_direction dir)
386{
David Brazdil0f672f62019-12-10 10:32:29 +0000387 dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000388}
389
390static int vmd_dma_supported(struct device *dev, u64 mask)
391{
David Brazdil0f672f62019-12-10 10:32:29 +0000392 return dma_supported(to_vmd_dev(dev), mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000393}
394
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000395static u64 vmd_get_required_mask(struct device *dev)
396{
David Brazdil0f672f62019-12-10 10:32:29 +0000397 return dma_get_required_mask(to_vmd_dev(dev));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000398}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000399
400static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
401{
402 struct dma_domain *domain = &vmd->dma_domain;
403
404 if (get_dma_ops(&vmd->dev->dev))
405 del_dma_domain(domain);
406}
407
408#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \
409 do { \
410 if (source->fn) \
411 dest->fn = vmd_##fn; \
412 } while (0)
413
414static void vmd_setup_dma_ops(struct vmd_dev *vmd)
415{
416 const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
417 struct dma_map_ops *dest = &vmd->dma_ops;
418 struct dma_domain *domain = &vmd->dma_domain;
419
420 domain->domain_nr = vmd->sysdata.domain;
421 domain->dma_ops = dest;
422
423 if (!source)
424 return;
425 ASSIGN_VMD_DMA_OPS(source, dest, alloc);
426 ASSIGN_VMD_DMA_OPS(source, dest, free);
427 ASSIGN_VMD_DMA_OPS(source, dest, mmap);
428 ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
429 ASSIGN_VMD_DMA_OPS(source, dest, map_page);
430 ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
431 ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
432 ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
433 ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
434 ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
435 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
436 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000437 ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000438 ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000439 add_dma_domain(domain);
440}
441#undef ASSIGN_VMD_DMA_OPS
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000442
443static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
444 unsigned int devfn, int reg, int len)
445{
446 char __iomem *addr = vmd->cfgbar +
David Brazdil0f672f62019-12-10 10:32:29 +0000447 ((bus->number - vmd->busn_start) << 20) +
448 (devfn << 12) + reg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000449
450 if ((addr - vmd->cfgbar) + len >=
451 resource_size(&vmd->dev->resource[VMD_CFGBAR]))
452 return NULL;
453
454 return addr;
455}
456
457/*
458 * CPU may deadlock if config space is not serialized on some versions of this
459 * hardware, so all config space access is done under a spinlock.
460 */
461static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
462 int len, u32 *value)
463{
464 struct vmd_dev *vmd = vmd_from_bus(bus);
465 char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
466 unsigned long flags;
467 int ret = 0;
468
469 if (!addr)
470 return -EFAULT;
471
472 spin_lock_irqsave(&vmd->cfg_lock, flags);
473 switch (len) {
474 case 1:
475 *value = readb(addr);
476 break;
477 case 2:
478 *value = readw(addr);
479 break;
480 case 4:
481 *value = readl(addr);
482 break;
483 default:
484 ret = -EINVAL;
485 break;
486 }
487 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
488 return ret;
489}
490
491/*
492 * VMD h/w converts non-posted config writes to posted memory writes. The
493 * read-back in this function forces the completion so it returns only after
494 * the config space was written, as expected.
495 */
496static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
497 int len, u32 value)
498{
499 struct vmd_dev *vmd = vmd_from_bus(bus);
500 char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
501 unsigned long flags;
502 int ret = 0;
503
504 if (!addr)
505 return -EFAULT;
506
507 spin_lock_irqsave(&vmd->cfg_lock, flags);
508 switch (len) {
509 case 1:
510 writeb(value, addr);
511 readb(addr);
512 break;
513 case 2:
514 writew(value, addr);
515 readw(addr);
516 break;
517 case 4:
518 writel(value, addr);
519 readl(addr);
520 break;
521 default:
522 ret = -EINVAL;
523 break;
524 }
525 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
526 return ret;
527}
528
529static struct pci_ops vmd_ops = {
530 .read = vmd_pci_read,
531 .write = vmd_pci_write,
532};
533
534static void vmd_attach_resources(struct vmd_dev *vmd)
535{
536 vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
537 vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2];
538}
539
540static void vmd_detach_resources(struct vmd_dev *vmd)
541{
542 vmd->dev->resource[VMD_MEMBAR1].child = NULL;
543 vmd->dev->resource[VMD_MEMBAR2].child = NULL;
544}
545
546/*
547 * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
548 * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower
549 * 16 bits are the PCI Segment Group (domain) number. Other bits are
550 * currently reserved.
551 */
552static int vmd_find_free_domain(void)
553{
554 int domain = 0xffff;
555 struct pci_bus *bus = NULL;
556
557 while ((bus = pci_find_next_bus(bus)) != NULL)
558 domain = max_t(int, domain, pci_domain_nr(bus));
559 return domain + 1;
560}
561
562static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
563{
564 struct pci_sysdata *sd = &vmd->sysdata;
565 struct fwnode_handle *fn;
566 struct resource *res;
567 u32 upper_bits;
568 unsigned long flags;
569 LIST_HEAD(resources);
570 resource_size_t offset[2] = {0};
David Brazdil0f672f62019-12-10 10:32:29 +0000571 resource_size_t membar2_offset = 0x2000;
572 struct pci_bus *child;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000573
574 /*
575 * Shadow registers may exist in certain VMD device ids which allow
576 * guests to correctly assign host physical addresses to the root ports
577 * and child devices. These registers will either return the host value
578 * or 0, depending on an enable bit in the VMD device.
579 */
580 if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
581 u32 vmlock;
582 int ret;
583
David Brazdil0f672f62019-12-10 10:32:29 +0000584 membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000585 ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
586 if (ret || vmlock == ~0)
587 return -ENODEV;
588
589 if (MB2_SHADOW_EN(vmlock)) {
590 void __iomem *membar2;
591
592 membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
593 if (!membar2)
594 return -ENOMEM;
595 offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
Olivier Deprez0e641232021-09-23 10:07:05 +0200596 (readq(membar2 + MB2_SHADOW_OFFSET) &
597 PCI_BASE_ADDRESS_MEM_MASK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000598 offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
Olivier Deprez0e641232021-09-23 10:07:05 +0200599 (readq(membar2 + MB2_SHADOW_OFFSET + 8) &
600 PCI_BASE_ADDRESS_MEM_MASK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000601 pci_iounmap(vmd->dev, membar2);
602 }
603 }
604
605 /*
606 * Certain VMD devices may have a root port configuration option which
607 * limits the bus range to between 0-127 or 128-255
608 */
609 if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
610 u32 vmcap, vmconfig;
611
612 pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
613 pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
614 if (BUS_RESTRICT_CAP(vmcap) &&
615 (BUS_RESTRICT_CFG(vmconfig) == 0x1))
David Brazdil0f672f62019-12-10 10:32:29 +0000616 vmd->busn_start = 128;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000617 }
618
619 res = &vmd->dev->resource[VMD_CFGBAR];
620 vmd->resources[0] = (struct resource) {
621 .name = "VMD CFGBAR",
David Brazdil0f672f62019-12-10 10:32:29 +0000622 .start = vmd->busn_start,
623 .end = vmd->busn_start + (resource_size(res) >> 20) - 1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000624 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
625 };
626
627 /*
628 * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can
629 * put 32-bit resources in the window.
630 *
631 * There's no hardware reason why a 64-bit window *couldn't*
632 * contain a 32-bit resource, but pbus_size_mem() computes the
633 * bridge window size assuming a 64-bit window will contain no
634 * 32-bit resources. __pci_assign_resource() enforces that
635 * artificial restriction to make sure everything will fit.
636 *
David Brazdil0f672f62019-12-10 10:32:29 +0000637 * The only way we could use a 64-bit non-prefetchable MEMBAR is
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000638 * if its address is <4GB so that we can convert it to a 32-bit
639 * resource. To be visible to the host OS, all VMD endpoints must
640 * be initially configured by platform BIOS, which includes setting
641 * up these resources. We can assume the device is configured
642 * according to the platform needs.
643 */
644 res = &vmd->dev->resource[VMD_MEMBAR1];
645 upper_bits = upper_32_bits(res->end);
646 flags = res->flags & ~IORESOURCE_SIZEALIGN;
647 if (!upper_bits)
648 flags &= ~IORESOURCE_MEM_64;
649 vmd->resources[1] = (struct resource) {
650 .name = "VMD MEMBAR1",
651 .start = res->start,
652 .end = res->end,
653 .flags = flags,
654 .parent = res,
655 };
656
657 res = &vmd->dev->resource[VMD_MEMBAR2];
658 upper_bits = upper_32_bits(res->end);
659 flags = res->flags & ~IORESOURCE_SIZEALIGN;
660 if (!upper_bits)
661 flags &= ~IORESOURCE_MEM_64;
662 vmd->resources[2] = (struct resource) {
663 .name = "VMD MEMBAR2",
664 .start = res->start + membar2_offset,
665 .end = res->end,
666 .flags = flags,
667 .parent = res,
668 };
669
670 sd->vmd_domain = true;
671 sd->domain = vmd_find_free_domain();
672 if (sd->domain < 0)
673 return sd->domain;
674
675 sd->node = pcibus_to_node(vmd->dev->bus);
676
677 fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
678 if (!fn)
679 return -ENODEV;
680
681 vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
682 x86_vector_domain);
Olivier Deprez0e641232021-09-23 10:07:05 +0200683 if (!vmd->irq_domain) {
684 irq_domain_free_fwnode(fn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000685 return -ENODEV;
Olivier Deprez0e641232021-09-23 10:07:05 +0200686 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000687
688 pci_add_resource(&resources, &vmd->resources[0]);
689 pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
690 pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
691
David Brazdil0f672f62019-12-10 10:32:29 +0000692 vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
693 &vmd_ops, sd, &resources);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000694 if (!vmd->bus) {
695 pci_free_resource_list(&resources);
696 irq_domain_remove(vmd->irq_domain);
Olivier Deprez0e641232021-09-23 10:07:05 +0200697 irq_domain_free_fwnode(fn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000698 return -ENODEV;
699 }
700
701 vmd_attach_resources(vmd);
702 vmd_setup_dma_ops(vmd);
703 dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
David Brazdil0f672f62019-12-10 10:32:29 +0000704
705 pci_scan_child_bus(vmd->bus);
706 pci_assign_unassigned_bus_resources(vmd->bus);
707
708 /*
709 * VMD root buses are virtual and don't return true on pci_is_pcie()
710 * and will fail pcie_bus_configure_settings() early. It can instead be
711 * run on each of the real root ports.
712 */
713 list_for_each_entry(child, &vmd->bus->children, node)
714 pcie_bus_configure_settings(child);
715
716 pci_bus_add_devices(vmd->bus);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000717
718 WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
719 "domain"), "Can't create symlink to domain\n");
720 return 0;
721}
722
723static irqreturn_t vmd_irq(int irq, void *data)
724{
725 struct vmd_irq_list *irqs = data;
726 struct vmd_irq *vmdirq;
727 int idx;
728
729 idx = srcu_read_lock(&irqs->srcu);
730 list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
731 generic_handle_irq(vmdirq->virq);
732 srcu_read_unlock(&irqs->srcu, idx);
733
734 return IRQ_HANDLED;
735}
736
737static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
738{
739 struct vmd_dev *vmd;
740 int i, err;
741
742 if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
743 return -ENOMEM;
744
745 vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
746 if (!vmd)
747 return -ENOMEM;
748
749 vmd->dev = dev;
750 err = pcim_enable_device(dev);
751 if (err < 0)
752 return err;
753
754 vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
755 if (!vmd->cfgbar)
756 return -ENOMEM;
757
758 pci_set_master(dev);
759 if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
760 dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
761 return -ENODEV;
762
763 vmd->msix_count = pci_msix_vec_count(dev);
764 if (vmd->msix_count < 0)
765 return -ENODEV;
766
767 vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
768 PCI_IRQ_MSIX);
769 if (vmd->msix_count < 0)
770 return vmd->msix_count;
771
772 vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
773 GFP_KERNEL);
774 if (!vmd->irqs)
775 return -ENOMEM;
776
777 for (i = 0; i < vmd->msix_count; i++) {
778 err = init_srcu_struct(&vmd->irqs[i].srcu);
779 if (err)
780 return err;
781
782 INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
783 err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
784 vmd_irq, IRQF_NO_THREAD,
785 "vmd", &vmd->irqs[i]);
786 if (err)
787 return err;
788 }
789
790 spin_lock_init(&vmd->cfg_lock);
791 pci_set_drvdata(dev, vmd);
792 err = vmd_enable_domain(vmd, (unsigned long) id->driver_data);
793 if (err)
794 return err;
795
796 dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
797 vmd->sysdata.domain);
798 return 0;
799}
800
801static void vmd_cleanup_srcu(struct vmd_dev *vmd)
802{
803 int i;
804
805 for (i = 0; i < vmd->msix_count; i++)
806 cleanup_srcu_struct(&vmd->irqs[i].srcu);
807}
808
809static void vmd_remove(struct pci_dev *dev)
810{
811 struct vmd_dev *vmd = pci_get_drvdata(dev);
Olivier Deprez0e641232021-09-23 10:07:05 +0200812 struct fwnode_handle *fn = vmd->irq_domain->fwnode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000813
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000814 sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
815 pci_stop_root_bus(vmd->bus);
816 pci_remove_root_bus(vmd->bus);
817 vmd_cleanup_srcu(vmd);
818 vmd_teardown_dma_ops(vmd);
David Brazdil0f672f62019-12-10 10:32:29 +0000819 vmd_detach_resources(vmd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000820 irq_domain_remove(vmd->irq_domain);
Olivier Deprez0e641232021-09-23 10:07:05 +0200821 irq_domain_free_fwnode(fn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000822}
823
824#ifdef CONFIG_PM_SLEEP
825static int vmd_suspend(struct device *dev)
826{
827 struct pci_dev *pdev = to_pci_dev(dev);
828 struct vmd_dev *vmd = pci_get_drvdata(pdev);
829 int i;
830
831 for (i = 0; i < vmd->msix_count; i++)
832 devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
833
834 pci_save_state(pdev);
835 return 0;
836}
837
838static int vmd_resume(struct device *dev)
839{
840 struct pci_dev *pdev = to_pci_dev(dev);
841 struct vmd_dev *vmd = pci_get_drvdata(pdev);
842 int err, i;
843
844 for (i = 0; i < vmd->msix_count; i++) {
845 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
846 vmd_irq, IRQF_NO_THREAD,
847 "vmd", &vmd->irqs[i]);
848 if (err)
849 return err;
850 }
851
852 pci_restore_state(pdev);
853 return 0;
854}
855#endif
856static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
857
858static const struct pci_device_id vmd_ids[] = {
859 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),},
860 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
861 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
862 VMD_FEAT_HAS_BUS_RESTRICTIONS,},
Olivier Deprez0e641232021-09-23 10:07:05 +0200863 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
864 .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000865 {0,}
866};
867MODULE_DEVICE_TABLE(pci, vmd_ids);
868
869static struct pci_driver vmd_drv = {
870 .name = "vmd",
871 .id_table = vmd_ids,
872 .probe = vmd_probe,
873 .remove = vmd_remove,
874 .driver = {
875 .pm = &vmd_dev_pm_ops,
876 },
877};
878module_pci_driver(vmd_drv);
879
880MODULE_AUTHOR("Intel Corporation");
881MODULE_LICENSE("GPL v2");
882MODULE_VERSION("0.6");