blob: 9a937f8b27838668d92b769c0e6461044b78d862 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * pci.h
4 *
5 * PCI defines and function prototypes
6 * Copyright 1994, Drew Eckhardt
7 * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8 *
David Brazdil0f672f62019-12-10 10:32:29 +00009 * PCI Express ASPM defines and function prototypes
10 * Copyright (c) 2007 Intel Corp.
11 * Zhang Yanmin (yanmin.zhang@intel.com)
12 * Shaohua Li (shaohua.li@intel.com)
13 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014 * For more information, please consult the following manuals (look at
15 * http://www.pcisig.com/ for how to get them):
16 *
17 * PCI BIOS Specification
18 * PCI Local Bus Specification
19 * PCI to PCI Bridge Specification
David Brazdil0f672f62019-12-10 10:32:29 +000020 * PCI Express Specification
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021 * PCI System Design Guide
22 */
23#ifndef LINUX_PCI_H
24#define LINUX_PCI_H
25
26
27#include <linux/mod_devicetable.h>
28
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/ioport.h>
32#include <linux/list.h>
33#include <linux/compiler.h>
34#include <linux/errno.h>
35#include <linux/kobject.h>
36#include <linux/atomic.h>
37#include <linux/device.h>
38#include <linux/interrupt.h>
39#include <linux/io.h>
40#include <linux/resource_ext.h>
41#include <uapi/linux/pci.h>
42
43#include <linux/pci_ids.h>
44
45/*
46 * The PCI interface treats multi-function devices as independent
47 * devices. The slot/function address of each device is encoded
48 * in a single byte as follows:
49 *
50 * 7:3 = slot
51 * 2:0 = function
52 *
53 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
54 * In the interest of not exposing interfaces to user-space unnecessarily,
55 * the following kernel-only defines are being added here.
56 */
57#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
58/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
59#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
60
61/* pci_slot represents a physical slot */
62struct pci_slot {
63 struct pci_bus *bus; /* Bus this slot is on */
64 struct list_head list; /* Node in list of slots */
65 struct hotplug_slot *hotplug; /* Hotplug info (move here) */
66 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
67 struct kobject kobj;
68};
69
70static inline const char *pci_slot_name(const struct pci_slot *slot)
71{
72 return kobject_name(&slot->kobj);
73}
74
75/* File state for mmap()s on /proc/bus/pci/X/Y */
76enum pci_mmap_state {
77 pci_mmap_io,
78 pci_mmap_mem
79};
80
81/* For PCI devices, the region numbers are assigned this way: */
82enum {
83 /* #0-5: standard PCI resources */
84 PCI_STD_RESOURCES,
85 PCI_STD_RESOURCE_END = 5,
86
87 /* #6: expansion ROM resource */
88 PCI_ROM_RESOURCE,
89
90 /* Device-specific resources */
91#ifdef CONFIG_PCI_IOV
92 PCI_IOV_RESOURCES,
93 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
94#endif
95
96 /* Resources assigned to buses behind the bridge */
97#define PCI_BRIDGE_RESOURCE_NUM 4
98
99 PCI_BRIDGE_RESOURCES,
100 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
101 PCI_BRIDGE_RESOURCE_NUM - 1,
102
103 /* Total resources associated with a PCI device */
104 PCI_NUM_RESOURCES,
105
106 /* Preserve this for compatibility */
107 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
108};
109
110/**
111 * enum pci_interrupt_pin - PCI INTx interrupt values
112 * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
113 * @PCI_INTERRUPT_INTA: PCI INTA pin
114 * @PCI_INTERRUPT_INTB: PCI INTB pin
115 * @PCI_INTERRUPT_INTC: PCI INTC pin
116 * @PCI_INTERRUPT_INTD: PCI INTD pin
117 *
118 * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
119 * PCI_INTERRUPT_PIN register.
120 */
121enum pci_interrupt_pin {
122 PCI_INTERRUPT_UNKNOWN,
123 PCI_INTERRUPT_INTA,
124 PCI_INTERRUPT_INTB,
125 PCI_INTERRUPT_INTC,
126 PCI_INTERRUPT_INTD,
127};
128
129/* The number of legacy PCI INTx interrupts */
130#define PCI_NUM_INTX 4
131
132/*
133 * pci_power_t values must match the bits in the Capabilities PME_Support
134 * and Control/Status PowerState fields in the Power Management capability.
135 */
136typedef int __bitwise pci_power_t;
137
138#define PCI_D0 ((pci_power_t __force) 0)
139#define PCI_D1 ((pci_power_t __force) 1)
140#define PCI_D2 ((pci_power_t __force) 2)
141#define PCI_D3hot ((pci_power_t __force) 3)
142#define PCI_D3cold ((pci_power_t __force) 4)
143#define PCI_UNKNOWN ((pci_power_t __force) 5)
144#define PCI_POWER_ERROR ((pci_power_t __force) -1)
145
146/* Remember to update this when the list above changes! */
147extern const char *pci_power_names[];
148
149static inline const char *pci_power_name(pci_power_t state)
150{
151 return pci_power_names[1 + (__force int) state];
152}
153
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000154/**
David Brazdil0f672f62019-12-10 10:32:29 +0000155 * typedef pci_channel_state_t
156 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000157 * The pci_channel state describes connectivity between the CPU and
158 * the PCI device. If some PCI bus between here and the PCI device
159 * has crashed or locked up, this info is reflected here.
160 */
161typedef unsigned int __bitwise pci_channel_state_t;
162
163enum pci_channel_state {
164 /* I/O channel is in normal state */
165 pci_channel_io_normal = (__force pci_channel_state_t) 1,
166
167 /* I/O to channel is blocked */
168 pci_channel_io_frozen = (__force pci_channel_state_t) 2,
169
170 /* PCI card is dead */
171 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
172};
173
174typedef unsigned int __bitwise pcie_reset_state_t;
175
176enum pcie_reset_state {
177 /* Reset is NOT asserted (Use to deassert reset) */
178 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
179
180 /* Use #PERST to reset PCIe device */
181 pcie_warm_reset = (__force pcie_reset_state_t) 2,
182
183 /* Use PCIe Hot Reset to reset device */
184 pcie_hot_reset = (__force pcie_reset_state_t) 3
185};
186
187typedef unsigned short __bitwise pci_dev_flags_t;
188enum pci_dev_flags {
189 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */
190 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
191 /* Device configuration is irrevocably lost if disabled into D3 */
192 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
193 /* Provide indication device is assigned by a Virtual Machine Manager */
194 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
195 /* Flag for quirk use to store if quirk-specific ACS is enabled */
196 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
197 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
198 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
199 /* Do not use bus resets for device */
200 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
201 /* Do not use PM reset even if device advertises NoSoftRst- */
202 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
203 /* Get VPD from function 0 VPD */
204 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
205 /* A non-root bridge where translation occurs, stop alias search here */
206 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
207 /* Do not use FLR even if device advertises PCI_AF_CAP */
208 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
209 /* Don't use Relaxed Ordering for TLPs directed at this device */
210 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
211};
212
213enum pci_irq_reroute_variant {
214 INTEL_IRQ_REROUTE_VARIANT = 1,
215 MAX_IRQ_REROUTE_VARIANTS = 3
216};
217
218typedef unsigned short __bitwise pci_bus_flags_t;
219enum pci_bus_flags {
220 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
221 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
222 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
223 PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
224};
225
226/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
227enum pcie_link_width {
228 PCIE_LNK_WIDTH_RESRV = 0x00,
229 PCIE_LNK_X1 = 0x01,
230 PCIE_LNK_X2 = 0x02,
231 PCIE_LNK_X4 = 0x04,
232 PCIE_LNK_X8 = 0x08,
233 PCIE_LNK_X12 = 0x0c,
234 PCIE_LNK_X16 = 0x10,
235 PCIE_LNK_X32 = 0x20,
236 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
237};
238
239/* Based on the PCI Hotplug Spec, but some values are made up by us */
240enum pci_bus_speed {
241 PCI_SPEED_33MHz = 0x00,
242 PCI_SPEED_66MHz = 0x01,
243 PCI_SPEED_66MHz_PCIX = 0x02,
244 PCI_SPEED_100MHz_PCIX = 0x03,
245 PCI_SPEED_133MHz_PCIX = 0x04,
246 PCI_SPEED_66MHz_PCIX_ECC = 0x05,
247 PCI_SPEED_100MHz_PCIX_ECC = 0x06,
248 PCI_SPEED_133MHz_PCIX_ECC = 0x07,
249 PCI_SPEED_66MHz_PCIX_266 = 0x09,
250 PCI_SPEED_100MHz_PCIX_266 = 0x0a,
251 PCI_SPEED_133MHz_PCIX_266 = 0x0b,
252 AGP_UNKNOWN = 0x0c,
253 AGP_1X = 0x0d,
254 AGP_2X = 0x0e,
255 AGP_4X = 0x0f,
256 AGP_8X = 0x10,
257 PCI_SPEED_66MHz_PCIX_533 = 0x11,
258 PCI_SPEED_100MHz_PCIX_533 = 0x12,
259 PCI_SPEED_133MHz_PCIX_533 = 0x13,
260 PCIE_SPEED_2_5GT = 0x14,
261 PCIE_SPEED_5_0GT = 0x15,
262 PCIE_SPEED_8_0GT = 0x16,
263 PCIE_SPEED_16_0GT = 0x17,
David Brazdil0f672f62019-12-10 10:32:29 +0000264 PCIE_SPEED_32_0GT = 0x18,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265 PCI_SPEED_UNKNOWN = 0xff,
266};
267
268enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
269enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
270
271struct pci_cap_saved_data {
272 u16 cap_nr;
273 bool cap_extended;
274 unsigned int size;
275 u32 data[0];
276};
277
278struct pci_cap_saved_state {
279 struct hlist_node next;
280 struct pci_cap_saved_data cap;
281};
282
283struct irq_affinity;
284struct pcie_link_state;
285struct pci_vpd;
286struct pci_sriov;
287struct pci_ats;
David Brazdil0f672f62019-12-10 10:32:29 +0000288struct pci_p2pdma;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000289
290/* The pci_dev structure describes PCI devices */
291struct pci_dev {
292 struct list_head bus_list; /* Node in per-bus list */
293 struct pci_bus *bus; /* Bus this device is on */
294 struct pci_bus *subordinate; /* Bus this device bridges to */
295
296 void *sysdata; /* Hook for sys-specific extension */
297 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */
298 struct pci_slot *slot; /* Physical slot this device is in */
299
300 unsigned int devfn; /* Encoded device & function index */
301 unsigned short vendor;
302 unsigned short device;
303 unsigned short subsystem_vendor;
304 unsigned short subsystem_device;
305 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
306 u8 revision; /* PCI revision, low byte of class word */
307 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
308#ifdef CONFIG_PCIEAER
309 u16 aer_cap; /* AER capability offset */
310 struct aer_stats *aer_stats; /* AER stats for this device */
311#endif
312 u8 pcie_cap; /* PCIe capability offset */
313 u8 msi_cap; /* MSI capability offset */
314 u8 msix_cap; /* MSI-X capability offset */
315 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
316 u8 rom_base_reg; /* Config register controlling ROM */
317 u8 pin; /* Interrupt pin this device uses */
318 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
319 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
320
321 struct pci_driver *driver; /* Driver bound to this device */
322 u64 dma_mask; /* Mask of the bits of bus address this
323 device implements. Normally this is
324 0xffffffff. You only need to change
325 this if your device has broken DMA
326 or supports 64-bit transfers. */
327
328 struct device_dma_parameters dma_parms;
329
330 pci_power_t current_state; /* Current operating state. In ACPI,
331 this is D0-D3, D0 being fully
332 functional, and D3 being off. */
David Brazdil0f672f62019-12-10 10:32:29 +0000333 unsigned int imm_ready:1; /* Supports Immediate Readiness */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000334 u8 pm_cap; /* PM capability offset */
335 unsigned int pme_support:5; /* Bitmask of states from which PME#
336 can be generated */
337 unsigned int pme_poll:1; /* Poll device's PME status bit */
338 unsigned int d1_support:1; /* Low power state D1 is supported */
339 unsigned int d2_support:1; /* Low power state D2 is supported */
340 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
341 unsigned int no_d3cold:1; /* D3cold is forbidden */
342 unsigned int bridge_d3:1; /* Allow D3 for bridge */
343 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
344 unsigned int mmio_always_on:1; /* Disallow turning off io/mem
345 decoding during BAR sizing */
346 unsigned int wakeup_prepared:1;
347 unsigned int runtime_d3cold:1; /* Whether go through runtime
348 D3cold, not set for devices
349 powered on/off by the
350 corresponding bridge */
David Brazdil0f672f62019-12-10 10:32:29 +0000351 unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000352 unsigned int ignore_hotplug:1; /* Ignore hotplug events */
353 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
354 controlled exclusively by
355 user sysfs */
David Brazdil0f672f62019-12-10 10:32:29 +0000356 unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
357 bit manually */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000358 unsigned int d3_delay; /* D3->D0 transition time in ms */
359 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
360
361#ifdef CONFIG_PCIEASPM
362 struct pcie_link_state *link_state; /* ASPM link state */
363 unsigned int ltr_path:1; /* Latency Tolerance Reporting
364 supported from root to here */
365#endif
366 unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
367
368 pci_channel_state_t error_state; /* Current connectivity state */
369 struct device dev; /* Generic device interface */
370
371 int cfg_size; /* Size of config space */
372
373 /*
374 * Instead of touching interrupt line and base address registers
375 * directly, use the values stored here. They might be different!
376 */
377 unsigned int irq;
378 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
379
380 bool match_driver; /* Skip attaching driver */
381
382 unsigned int transparent:1; /* Subtractive decode bridge */
David Brazdil0f672f62019-12-10 10:32:29 +0000383 unsigned int io_window:1; /* Bridge has I/O window */
384 unsigned int pref_window:1; /* Bridge has pref mem window */
385 unsigned int pref_64_window:1; /* Pref mem window is 64-bit */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000386 unsigned int multifunction:1; /* Multi-function device */
387
388 unsigned int is_busmaster:1; /* Is busmaster */
389 unsigned int no_msi:1; /* May not use MSI */
David Brazdil0f672f62019-12-10 10:32:29 +0000390 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391 unsigned int block_cfg_access:1; /* Config space access blocked */
392 unsigned int broken_parity_status:1; /* Generates false positive parity */
393 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
394 unsigned int msi_enabled:1;
395 unsigned int msix_enabled:1;
396 unsigned int ari_enabled:1; /* ARI forwarding */
397 unsigned int ats_enabled:1; /* Address Translation Svc */
398 unsigned int pasid_enabled:1; /* Process Address Space ID */
399 unsigned int pri_enabled:1; /* Page Request Interface */
400 unsigned int is_managed:1;
401 unsigned int needs_freset:1; /* Requires fundamental reset */
402 unsigned int state_saved:1;
403 unsigned int is_physfn:1;
404 unsigned int is_virtfn:1;
405 unsigned int reset_fn:1;
406 unsigned int is_hotplug_bridge:1;
407 unsigned int shpc_managed:1; /* SHPC owned by shpchp */
408 unsigned int is_thunderbolt:1; /* Thunderbolt controller */
David Brazdil0f672f62019-12-10 10:32:29 +0000409 /*
410 * Devices marked being untrusted are the ones that can potentially
411 * execute DMA attacks and similar. They are typically connected
412 * through external ports such as Thunderbolt but not limited to
413 * that. When an IOMMU is enabled they should be getting full
414 * mappings to make sure they cannot access arbitrary memory.
415 */
416 unsigned int untrusted:1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000417 unsigned int __aer_firmware_first_valid:1;
418 unsigned int __aer_firmware_first:1;
419 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
420 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
421 unsigned int irq_managed:1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000422 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
423 unsigned int is_probed:1; /* Device probing in progress */
David Brazdil0f672f62019-12-10 10:32:29 +0000424 unsigned int link_active_reporting:1;/* Device capable of reporting link active */
425 unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
Olivier Deprez0e641232021-09-23 10:07:05 +0200426 unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000427 pci_dev_flags_t dev_flags;
428 atomic_t enable_cnt; /* pci_enable_device has been called */
429
430 u32 saved_config_space[16]; /* Config space saved at suspend time */
431 struct hlist_head saved_cap_space;
432 struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */
433 int rom_attr_enabled; /* Display of ROM attribute enabled? */
434 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
435 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
436
437#ifdef CONFIG_HOTPLUG_PCI_PCIE
438 unsigned int broken_cmd_compl:1; /* No compl for some cmds */
439#endif
440#ifdef CONFIG_PCIE_PTM
441 unsigned int ptm_root:1;
442 unsigned int ptm_enabled:1;
443 u8 ptm_granularity;
444#endif
445#ifdef CONFIG_PCI_MSI
446 const struct attribute_group **msi_irq_groups;
447#endif
448 struct pci_vpd *vpd;
449#ifdef CONFIG_PCI_ATS
450 union {
451 struct pci_sriov *sriov; /* PF: SR-IOV info */
452 struct pci_dev *physfn; /* VF: related PF */
453 };
454 u16 ats_cap; /* ATS Capability offset */
455 u8 ats_stu; /* ATS Smallest Translation Unit */
456 atomic_t ats_ref_cnt; /* Number of VFs with ATS enabled */
457#endif
458#ifdef CONFIG_PCI_PRI
459 u32 pri_reqs_alloc; /* Number of PRI requests allocated */
460#endif
461#ifdef CONFIG_PCI_PASID
462 u16 pasid_features;
463#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000464#ifdef CONFIG_PCI_P2PDMA
465 struct pci_p2pdma *p2pdma;
466#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000467 phys_addr_t rom; /* Physical address if not from BAR */
468 size_t romlen; /* Length if not from BAR */
469 char *driver_override; /* Driver name to force a match */
470
471 unsigned long priv_flags; /* Private flags for the PCI driver */
472};
473
474static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
475{
476#ifdef CONFIG_PCI_IOV
477 if (dev->is_virtfn)
478 dev = dev->physfn;
479#endif
480 return dev;
481}
482
483struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
484
485#define to_pci_dev(n) container_of(n, struct pci_dev, dev)
486#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
487
488static inline int pci_channel_offline(struct pci_dev *pdev)
489{
490 return (pdev->error_state != pci_channel_io_normal);
491}
492
493struct pci_host_bridge {
494 struct device dev;
495 struct pci_bus *bus; /* Root bus */
496 struct pci_ops *ops;
497 void *sysdata;
498 int busnr;
499 struct list_head windows; /* resource_entry */
David Brazdil0f672f62019-12-10 10:32:29 +0000500 struct list_head dma_ranges; /* dma ranges resource list */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000501 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
502 int (*map_irq)(const struct pci_dev *, u8, u8);
503 void (*release_fn)(struct pci_host_bridge *);
504 void *release_data;
505 struct msi_controller *msi;
506 unsigned int ignore_reset_delay:1; /* For entire hierarchy */
507 unsigned int no_ext_tags:1; /* No Extended Tags */
508 unsigned int native_aer:1; /* OS may use PCIe AER */
509 unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
510 unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
511 unsigned int native_pme:1; /* OS may use PCIe PME */
512 unsigned int native_ltr:1; /* OS may use PCIe LTR */
David Brazdil0f672f62019-12-10 10:32:29 +0000513 unsigned int preserve_config:1; /* Preserve FW resource setup */
514
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515 /* Resource alignment requirements */
516 resource_size_t (*align_resource)(struct pci_dev *dev,
517 const struct resource *res,
518 resource_size_t start,
519 resource_size_t size,
520 resource_size_t align);
521 unsigned long private[0] ____cacheline_aligned;
522};
523
524#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
525
526static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
527{
528 return (void *)bridge->private;
529}
530
531static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
532{
533 return container_of(priv, struct pci_host_bridge, private);
534}
535
536struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
537struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
538 size_t priv);
539void pci_free_host_bridge(struct pci_host_bridge *bridge);
540struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
541
542void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
543 void (*release_fn)(struct pci_host_bridge *),
544 void *release_data);
545
546int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
547
548/*
549 * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
550 * to P2P or CardBus bridge windows) go in a table. Additional ones (for
551 * buses below host bridges or subtractive decode bridges) go in the list.
552 * Use pci_bus_for_each_resource() to iterate through all the resources.
553 */
554
555/*
556 * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
557 * and there's no way to program the bridge with the details of the window.
558 * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
559 * decode bit set, because they are explicit and can be programmed with _SRS.
560 */
561#define PCI_SUBTRACTIVE_DECODE 0x1
562
563struct pci_bus_resource {
564 struct list_head list;
565 struct resource *res;
566 unsigned int flags;
567};
568
569#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
570
571struct pci_bus {
572 struct list_head node; /* Node in list of buses */
573 struct pci_bus *parent; /* Parent bus this bridge is on */
574 struct list_head children; /* List of child buses */
575 struct list_head devices; /* List of devices on this bus */
576 struct pci_dev *self; /* Bridge device as seen by parent */
577 struct list_head slots; /* List of slots on this bus;
578 protected by pci_slot_mutex */
579 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
580 struct list_head resources; /* Address space routed to this bus */
581 struct resource busn_res; /* Bus numbers routed to this bus */
582
583 struct pci_ops *ops; /* Configuration access functions */
584 struct msi_controller *msi; /* MSI controller */
585 void *sysdata; /* Hook for sys-specific extension */
586 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
587
588 unsigned char number; /* Bus number */
589 unsigned char primary; /* Number of primary bridge */
590 unsigned char max_bus_speed; /* enum pci_bus_speed */
591 unsigned char cur_bus_speed; /* enum pci_bus_speed */
592#ifdef CONFIG_PCI_DOMAINS_GENERIC
593 int domain_nr;
594#endif
595
596 char name[48];
597
598 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */
599 pci_bus_flags_t bus_flags; /* Inherited by child buses */
600 struct device *bridge;
601 struct device dev;
602 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
603 struct bin_attribute *legacy_mem; /* Legacy mem */
604 unsigned int is_added:1;
605};
606
607#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
608
David Brazdil0f672f62019-12-10 10:32:29 +0000609static inline u16 pci_dev_id(struct pci_dev *dev)
610{
611 return PCI_DEVID(dev->bus->number, dev->devfn);
612}
613
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000614/*
615 * Returns true if the PCI bus is root (behind host-PCI bridge),
616 * false otherwise
617 *
618 * Some code assumes that "bus->self == NULL" means that bus is a root bus.
619 * This is incorrect because "virtual" buses added for SR-IOV (via
620 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
621 */
622static inline bool pci_is_root_bus(struct pci_bus *pbus)
623{
624 return !(pbus->parent);
625}
626
627/**
628 * pci_is_bridge - check if the PCI device is a bridge
629 * @dev: PCI device
630 *
631 * Return true if the PCI device is bridge whether it has subordinate
632 * or not.
633 */
634static inline bool pci_is_bridge(struct pci_dev *dev)
635{
636 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
637 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
638}
639
640#define for_each_pci_bridge(dev, bus) \
641 list_for_each_entry(dev, &bus->devices, bus_list) \
642 if (!pci_is_bridge(dev)) {} else
643
644static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
645{
646 dev = pci_physfn(dev);
647 if (pci_is_root_bus(dev->bus))
648 return NULL;
649
650 return dev->bus->self;
651}
652
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000653#ifdef CONFIG_PCI_MSI
654static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
655{
656 return pci_dev->msi_enabled || pci_dev->msix_enabled;
657}
658#else
659static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
660#endif
661
662/* Error values that may be returned by PCI functions */
663#define PCIBIOS_SUCCESSFUL 0x00
664#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
665#define PCIBIOS_BAD_VENDOR_ID 0x83
666#define PCIBIOS_DEVICE_NOT_FOUND 0x86
667#define PCIBIOS_BAD_REGISTER_NUMBER 0x87
668#define PCIBIOS_SET_FAILED 0x88
669#define PCIBIOS_BUFFER_TOO_SMALL 0x89
670
671/* Translate above to generic errno for passing back through non-PCI code */
672static inline int pcibios_err_to_errno(int err)
673{
674 if (err <= PCIBIOS_SUCCESSFUL)
675 return err; /* Assume already errno */
676
677 switch (err) {
678 case PCIBIOS_FUNC_NOT_SUPPORTED:
679 return -ENOENT;
680 case PCIBIOS_BAD_VENDOR_ID:
681 return -ENOTTY;
682 case PCIBIOS_DEVICE_NOT_FOUND:
683 return -ENODEV;
684 case PCIBIOS_BAD_REGISTER_NUMBER:
685 return -EFAULT;
686 case PCIBIOS_SET_FAILED:
687 return -EIO;
688 case PCIBIOS_BUFFER_TOO_SMALL:
689 return -ENOSPC;
690 }
691
692 return -ERANGE;
693}
694
695/* Low-level architecture-dependent routines */
696
697struct pci_ops {
698 int (*add_bus)(struct pci_bus *bus);
699 void (*remove_bus)(struct pci_bus *bus);
700 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
701 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
702 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
703};
704
705/*
706 * ACPI needs to be able to access PCI config space before we've done a
707 * PCI bus scan and created pci_bus structures.
708 */
709int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
710 int reg, int len, u32 *val);
711int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
712 int reg, int len, u32 val);
713
714#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
715typedef u64 pci_bus_addr_t;
716#else
717typedef u32 pci_bus_addr_t;
718#endif
719
720struct pci_bus_region {
721 pci_bus_addr_t start;
722 pci_bus_addr_t end;
723};
724
725struct pci_dynids {
726 spinlock_t lock; /* Protects list, index */
727 struct list_head list; /* For IDs added at runtime */
728};
729
730
731/*
732 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
733 * a set of callbacks in struct pci_error_handlers, that device driver
734 * will be notified of PCI bus errors, and will be driven to recovery
735 * when an error occurs.
736 */
737
738typedef unsigned int __bitwise pci_ers_result_t;
739
740enum pci_ers_result {
741 /* No result/none/not supported in device driver */
742 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
743
744 /* Device driver can recover without slot reset */
745 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
746
747 /* Device driver wants slot to be reset */
748 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
749
750 /* Device has completely failed, is unrecoverable */
751 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
752
753 /* Device driver is fully recovered and operational */
754 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
755
756 /* No AER capabilities registered for the driver */
757 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
758};
759
760/* PCI bus error event callbacks */
761struct pci_error_handlers {
762 /* PCI bus error detected on this device */
763 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
764 enum pci_channel_state error);
765
766 /* MMIO has been re-enabled, but not DMA */
767 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
768
769 /* PCI slot has been reset */
770 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
771
772 /* PCI function reset prepare or completed */
773 void (*reset_prepare)(struct pci_dev *dev);
774 void (*reset_done)(struct pci_dev *dev);
775
776 /* Device driver may resume normal operations */
777 void (*resume)(struct pci_dev *dev);
778};
779
780
781struct module;
David Brazdil0f672f62019-12-10 10:32:29 +0000782
783/**
784 * struct pci_driver - PCI driver structure
785 * @node: List of driver structures.
786 * @name: Driver name.
787 * @id_table: Pointer to table of device IDs the driver is
788 * interested in. Most drivers should export this
789 * table using MODULE_DEVICE_TABLE(pci,...).
790 * @probe: This probing function gets called (during execution
791 * of pci_register_driver() for already existing
792 * devices or later if a new device gets inserted) for
793 * all PCI devices which match the ID table and are not
794 * "owned" by the other drivers yet. This function gets
795 * passed a "struct pci_dev \*" for each device whose
796 * entry in the ID table matches the device. The probe
797 * function returns zero when the driver chooses to
798 * take "ownership" of the device or an error code
799 * (negative number) otherwise.
800 * The probe function always gets called from process
801 * context, so it can sleep.
802 * @remove: The remove() function gets called whenever a device
803 * being handled by this driver is removed (either during
804 * deregistration of the driver or when it's manually
805 * pulled out of a hot-pluggable slot).
806 * The remove function always gets called from process
807 * context, so it can sleep.
808 * @suspend: Put device into low power state.
809 * @suspend_late: Put device into low power state.
810 * @resume_early: Wake device from low power state.
811 * @resume: Wake device from low power state.
812 * (Please see Documentation/power/pci.rst for descriptions
813 * of PCI Power Management and the related functions.)
814 * @shutdown: Hook into reboot_notifier_list (kernel/sys.c).
815 * Intended to stop any idling DMA operations.
816 * Useful for enabling wake-on-lan (NIC) or changing
817 * the power state of a device before reboot.
818 * e.g. drivers/net/e100.c.
819 * @sriov_configure: Optional driver callback to allow configuration of
820 * number of VFs to enable via sysfs "sriov_numvfs" file.
821 * @err_handler: See Documentation/PCI/pci-error-recovery.rst
822 * @groups: Sysfs attribute groups.
823 * @driver: Driver model structure.
824 * @dynids: List of dynamically added device IDs.
825 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000826struct pci_driver {
827 struct list_head node;
828 const char *name;
829 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
830 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
831 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
832 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
833 int (*suspend_late)(struct pci_dev *dev, pm_message_t state);
834 int (*resume_early)(struct pci_dev *dev);
David Brazdil0f672f62019-12-10 10:32:29 +0000835 int (*resume)(struct pci_dev *dev); /* Device woken up */
836 void (*shutdown)(struct pci_dev *dev);
837 int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000838 const struct pci_error_handlers *err_handler;
839 const struct attribute_group **groups;
840 struct device_driver driver;
841 struct pci_dynids dynids;
842};
843
844#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
845
846/**
847 * PCI_DEVICE - macro used to describe a specific PCI device
848 * @vend: the 16 bit PCI Vendor ID
849 * @dev: the 16 bit PCI Device ID
850 *
851 * This macro is used to create a struct pci_device_id that matches a
852 * specific device. The subvendor and subdevice fields will be set to
853 * PCI_ANY_ID.
854 */
855#define PCI_DEVICE(vend,dev) \
856 .vendor = (vend), .device = (dev), \
857 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
858
859/**
860 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
861 * @vend: the 16 bit PCI Vendor ID
862 * @dev: the 16 bit PCI Device ID
863 * @subvend: the 16 bit PCI Subvendor ID
864 * @subdev: the 16 bit PCI Subdevice ID
865 *
866 * This macro is used to create a struct pci_device_id that matches a
867 * specific device with subsystem information.
868 */
869#define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
870 .vendor = (vend), .device = (dev), \
871 .subvendor = (subvend), .subdevice = (subdev)
872
873/**
874 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
875 * @dev_class: the class, subclass, prog-if triple for this device
876 * @dev_class_mask: the class mask for this device
877 *
878 * This macro is used to create a struct pci_device_id that matches a
879 * specific PCI class. The vendor, device, subvendor, and subdevice
880 * fields will be set to PCI_ANY_ID.
881 */
882#define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
883 .class = (dev_class), .class_mask = (dev_class_mask), \
884 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
885 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
886
887/**
888 * PCI_VDEVICE - macro used to describe a specific PCI device in short form
889 * @vend: the vendor name
890 * @dev: the 16 bit PCI Device ID
891 *
892 * This macro is used to create a struct pci_device_id that matches a
893 * specific PCI device. The subvendor, and subdevice fields will be set
894 * to PCI_ANY_ID. The macro allows the next field to follow as the device
895 * private data.
896 */
897#define PCI_VDEVICE(vend, dev) \
898 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
899 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
900
901/**
902 * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
903 * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
904 * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
905 * @data: the driver data to be filled
906 *
907 * This macro is used to create a struct pci_device_id that matches a
908 * specific PCI device. The subvendor, and subdevice fields will be set
909 * to PCI_ANY_ID.
910 */
911#define PCI_DEVICE_DATA(vend, dev, data) \
912 .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
913 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
914 .driver_data = (kernel_ulong_t)(data)
915
916enum {
917 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
918 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
919 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */
920 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */
921 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */
922 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
923 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
924};
925
David Brazdil0f672f62019-12-10 10:32:29 +0000926#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
927#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
928#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
929#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
930
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000931/* These external functions are only available when PCI support is enabled */
932#ifdef CONFIG_PCI
933
934extern unsigned int pci_flags;
935
936static inline void pci_set_flags(int flags) { pci_flags = flags; }
937static inline void pci_add_flags(int flags) { pci_flags |= flags; }
938static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
939static inline int pci_has_flag(int flag) { return pci_flags & flag; }
940
941void pcie_bus_configure_settings(struct pci_bus *bus);
942
943enum pcie_bus_config_types {
944 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */
945 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */
946 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */
947 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */
948 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */
949};
950
951extern enum pcie_bus_config_types pcie_bus_config;
952
953extern struct bus_type pci_bus_type;
954
955/* Do NOT directly access these two variables, unless you are arch-specific PCI
956 * code, or PCI core code. */
957extern struct list_head pci_root_buses; /* List of all known PCI buses */
958/* Some device drivers need know if PCI is initiated */
959int no_pci_devices(void);
960
961void pcibios_resource_survey_bus(struct pci_bus *bus);
962void pcibios_bus_add_device(struct pci_dev *pdev);
963void pcibios_add_bus(struct pci_bus *bus);
964void pcibios_remove_bus(struct pci_bus *bus);
965void pcibios_fixup_bus(struct pci_bus *);
966int __must_check pcibios_enable_device(struct pci_dev *, int mask);
967/* Architecture-specific versions may override this (weak) */
968char *pcibios_setup(char *str);
969
970/* Used only when drivers/pci/setup.c is used */
971resource_size_t pcibios_align_resource(void *, const struct resource *,
972 resource_size_t,
973 resource_size_t);
974
David Brazdil0f672f62019-12-10 10:32:29 +0000975/* Weak but can be overridden by arch */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000976void pci_fixup_cardbus(struct pci_bus *);
977
978/* Generic PCI functions used internally */
979
980void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
981 struct resource *res);
982void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
983 struct pci_bus_region *region);
984void pcibios_scan_specific_bus(int busn);
985struct pci_bus *pci_find_bus(int domain, int busnr);
986void pci_bus_add_devices(const struct pci_bus *bus);
987struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
988struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
989 struct pci_ops *ops, void *sysdata,
990 struct list_head *resources);
991int pci_host_probe(struct pci_host_bridge *bridge);
992int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
993int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
994void pci_bus_release_busn_res(struct pci_bus *b);
995struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
996 struct pci_ops *ops, void *sysdata,
997 struct list_head *resources);
998int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
999struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1000 int busnr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001001struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1002 const char *name,
1003 struct hotplug_slot *hotplug);
1004void pci_destroy_slot(struct pci_slot *slot);
1005#ifdef CONFIG_SYSFS
1006void pci_dev_assign_slot(struct pci_dev *dev);
1007#else
1008static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1009#endif
1010int pci_scan_slot(struct pci_bus *bus, int devfn);
1011struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1012void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1013unsigned int pci_scan_child_bus(struct pci_bus *bus);
1014void pci_bus_add_device(struct pci_dev *dev);
1015void pci_read_bridge_bases(struct pci_bus *child);
1016struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1017 struct resource *res);
1018struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev);
1019u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1020int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1021u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1022struct pci_dev *pci_dev_get(struct pci_dev *dev);
1023void pci_dev_put(struct pci_dev *dev);
1024void pci_remove_bus(struct pci_bus *b);
1025void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1026void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1027void pci_stop_root_bus(struct pci_bus *bus);
1028void pci_remove_root_bus(struct pci_bus *bus);
1029void pci_setup_cardbus(struct pci_bus *bus);
1030void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1031void pci_sort_breadthfirst(void);
1032#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1033#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1034
1035/* Generic PCI functions exported to card drivers */
1036
1037enum pci_lost_interrupt_reason {
1038 PCI_LOST_IRQ_NO_INFORMATION = 0,
1039 PCI_LOST_IRQ_DISABLE_MSI,
1040 PCI_LOST_IRQ_DISABLE_MSIX,
1041 PCI_LOST_IRQ_DISABLE_ACPI,
1042};
1043enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
1044int pci_find_capability(struct pci_dev *dev, int cap);
1045int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1046int pci_find_ext_capability(struct pci_dev *dev, int cap);
1047int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap);
1048int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1049int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
1050struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1051
1052struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1053 struct pci_dev *from);
1054struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1055 unsigned int ss_vendor, unsigned int ss_device,
1056 struct pci_dev *from);
1057struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1058struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1059 unsigned int devfn);
1060struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1061int pci_dev_present(const struct pci_device_id *ids);
1062
1063int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1064 int where, u8 *val);
1065int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1066 int where, u16 *val);
1067int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1068 int where, u32 *val);
1069int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1070 int where, u8 val);
1071int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1072 int where, u16 val);
1073int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1074 int where, u32 val);
1075
1076int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1077 int where, int size, u32 *val);
1078int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1079 int where, int size, u32 val);
1080int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1081 int where, int size, u32 *val);
1082int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1083 int where, int size, u32 val);
1084
1085struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1086
1087int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1088int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1089int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1090int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1091int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1092int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1093
1094int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1095int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1096int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1097int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1098int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
1099 u16 clear, u16 set);
1100int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1101 u32 clear, u32 set);
1102
1103static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1104 u16 set)
1105{
1106 return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1107}
1108
1109static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1110 u32 set)
1111{
1112 return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1113}
1114
1115static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1116 u16 clear)
1117{
1118 return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1119}
1120
1121static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1122 u32 clear)
1123{
1124 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1125}
1126
1127/* User-space driven config access */
1128int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1129int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1130int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1131int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1132int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1133int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1134
1135int __must_check pci_enable_device(struct pci_dev *dev);
1136int __must_check pci_enable_device_io(struct pci_dev *dev);
1137int __must_check pci_enable_device_mem(struct pci_dev *dev);
1138int __must_check pci_reenable_device(struct pci_dev *);
1139int __must_check pcim_enable_device(struct pci_dev *pdev);
1140void pcim_pin_device(struct pci_dev *pdev);
1141
1142static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1143{
1144 /*
1145 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1146 * writable and no quirk has marked the feature broken.
1147 */
1148 return !pdev->broken_intx_masking;
1149}
1150
1151static inline int pci_is_enabled(struct pci_dev *pdev)
1152{
1153 return (atomic_read(&pdev->enable_cnt) > 0);
1154}
1155
1156static inline int pci_is_managed(struct pci_dev *pdev)
1157{
1158 return pdev->is_managed;
1159}
1160
1161void pci_disable_device(struct pci_dev *dev);
1162
1163extern unsigned int pcibios_max_latency;
1164void pci_set_master(struct pci_dev *dev);
1165void pci_clear_master(struct pci_dev *dev);
1166
1167int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1168int pci_set_cacheline_size(struct pci_dev *dev);
1169#define HAVE_PCI_SET_MWI
1170int __must_check pci_set_mwi(struct pci_dev *dev);
1171int __must_check pcim_set_mwi(struct pci_dev *dev);
1172int pci_try_set_mwi(struct pci_dev *dev);
1173void pci_clear_mwi(struct pci_dev *dev);
1174void pci_intx(struct pci_dev *dev, int enable);
1175bool pci_check_and_mask_intx(struct pci_dev *dev);
1176bool pci_check_and_unmask_intx(struct pci_dev *dev);
1177int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1178int pci_wait_for_pending_transaction(struct pci_dev *dev);
1179int pcix_get_max_mmrbc(struct pci_dev *dev);
1180int pcix_get_mmrbc(struct pci_dev *dev);
1181int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1182int pcie_get_readrq(struct pci_dev *dev);
1183int pcie_set_readrq(struct pci_dev *dev, int rq);
1184int pcie_get_mps(struct pci_dev *dev);
1185int pcie_set_mps(struct pci_dev *dev, int mps);
1186u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1187 enum pci_bus_speed *speed,
1188 enum pcie_link_width *width);
1189void pcie_print_link_status(struct pci_dev *dev);
1190bool pcie_has_flr(struct pci_dev *dev);
1191int pcie_flr(struct pci_dev *dev);
1192int __pci_reset_function_locked(struct pci_dev *dev);
1193int pci_reset_function(struct pci_dev *dev);
1194int pci_reset_function_locked(struct pci_dev *dev);
1195int pci_try_reset_function(struct pci_dev *dev);
1196int pci_probe_reset_slot(struct pci_slot *slot);
1197int pci_probe_reset_bus(struct pci_bus *bus);
1198int pci_reset_bus(struct pci_dev *dev);
1199void pci_reset_secondary_bus(struct pci_dev *dev);
1200void pcibios_reset_secondary_bus(struct pci_dev *dev);
1201void pci_update_resource(struct pci_dev *dev, int resno);
1202int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1203int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1204void pci_release_resource(struct pci_dev *dev, int resno);
1205int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1206int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1207bool pci_device_is_present(struct pci_dev *pdev);
1208void pci_ignore_hotplug(struct pci_dev *dev);
1209
1210int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1211 irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1212 const char *fmt, ...);
1213void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1214
1215/* ROM control related routines */
1216int pci_enable_rom(struct pci_dev *pdev);
1217void pci_disable_rom(struct pci_dev *pdev);
1218void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1219void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001220
1221/* Power management related routines */
1222int pci_save_state(struct pci_dev *dev);
1223void pci_restore_state(struct pci_dev *dev);
1224struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1225int pci_load_saved_state(struct pci_dev *dev,
1226 struct pci_saved_state *state);
1227int pci_load_and_free_saved_state(struct pci_dev *dev,
1228 struct pci_saved_state **state);
1229struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
1230struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
1231 u16 cap);
1232int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
1233int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
1234 u16 cap, unsigned int size);
1235int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
1236int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1237pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1238bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1239void pci_pme_active(struct pci_dev *dev, bool enable);
1240int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1241int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1242int pci_prepare_to_sleep(struct pci_dev *dev);
1243int pci_back_from_sleep(struct pci_dev *dev);
1244bool pci_dev_run_wake(struct pci_dev *dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001245void pci_d3cold_enable(struct pci_dev *dev);
1246void pci_d3cold_disable(struct pci_dev *dev);
1247bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1248void pci_wakeup_bus(struct pci_bus *bus);
1249void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1250
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001251/* For use by arch with custom probe code */
1252void set_pcie_port_type(struct pci_dev *pdev);
1253void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1254
1255/* Functions for PCI Hotplug drivers to use */
1256int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1257unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
1258unsigned int pci_rescan_bus(struct pci_bus *bus);
1259void pci_lock_rescan_remove(void);
1260void pci_unlock_rescan_remove(void);
1261
1262/* Vital Product Data routines */
1263ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1264ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1265int pci_set_vpd_size(struct pci_dev *dev, size_t len);
1266
1267/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1268resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1269void pci_bus_assign_resources(const struct pci_bus *bus);
1270void pci_bus_claim_resources(struct pci_bus *bus);
1271void pci_bus_size_bridges(struct pci_bus *bus);
1272int pci_claim_resource(struct pci_dev *, int);
1273int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1274void pci_assign_unassigned_resources(void);
1275void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1276void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1277void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1278int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
1279void pdev_enable_device(struct pci_dev *);
1280int pci_enable_resources(struct pci_dev *, int mask);
1281void pci_assign_irq(struct pci_dev *dev);
1282struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1283#define HAVE_PCI_REQ_REGIONS 2
1284int __must_check pci_request_regions(struct pci_dev *, const char *);
1285int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1286void pci_release_regions(struct pci_dev *);
1287int __must_check pci_request_region(struct pci_dev *, int, const char *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001288void pci_release_region(struct pci_dev *, int);
1289int pci_request_selected_regions(struct pci_dev *, int, const char *);
1290int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1291void pci_release_selected_regions(struct pci_dev *, int);
1292
1293/* drivers/pci/bus.c */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001294void pci_add_resource(struct list_head *resources, struct resource *res);
1295void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1296 resource_size_t offset);
1297void pci_free_resource_list(struct list_head *resources);
1298void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
1299 unsigned int flags);
1300struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1301void pci_bus_remove_resources(struct pci_bus *bus);
1302int devm_request_pci_bus_resources(struct device *dev,
1303 struct list_head *resources);
1304
1305/* Temporary until new and working PCI SBR API in place */
1306int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1307
1308#define pci_bus_for_each_resource(bus, res, i) \
1309 for (i = 0; \
1310 (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
1311 i++)
1312
1313int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1314 struct resource *res, resource_size_t size,
1315 resource_size_t align, resource_size_t min,
1316 unsigned long type_mask,
1317 resource_size_t (*alignf)(void *,
1318 const struct resource *,
1319 resource_size_t,
1320 resource_size_t),
1321 void *alignf_data);
1322
1323
1324int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
1325 resource_size_t size);
1326unsigned long pci_address_to_pio(phys_addr_t addr);
1327phys_addr_t pci_pio_to_address(unsigned long pio);
1328int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1329int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1330 phys_addr_t phys_addr);
1331void pci_unmap_iospace(struct resource *res);
1332void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1333 resource_size_t offset,
1334 resource_size_t size);
1335void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1336 struct resource *res);
1337
1338static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1339{
1340 struct pci_bus_region region;
1341
1342 pcibios_resource_to_bus(pdev->bus, &region, &pdev->resource[bar]);
1343 return region.start;
1344}
1345
1346/* Proper probing supporting hot-pluggable devices */
1347int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1348 const char *mod_name);
1349
1350/* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1351#define pci_register_driver(driver) \
1352 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1353
1354void pci_unregister_driver(struct pci_driver *dev);
1355
1356/**
1357 * module_pci_driver() - Helper macro for registering a PCI driver
1358 * @__pci_driver: pci_driver struct
1359 *
1360 * Helper macro for PCI drivers which do not do anything special in module
1361 * init/exit. This eliminates a lot of boilerplate. Each module may only
1362 * use this macro once, and calling it replaces module_init() and module_exit()
1363 */
1364#define module_pci_driver(__pci_driver) \
1365 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1366
1367/**
1368 * builtin_pci_driver() - Helper macro for registering a PCI driver
1369 * @__pci_driver: pci_driver struct
1370 *
1371 * Helper macro for PCI drivers which do not do anything special in their
1372 * init code. This eliminates a lot of boilerplate. Each driver may only
1373 * use this macro once, and calling it replaces device_initcall(...)
1374 */
1375#define builtin_pci_driver(__pci_driver) \
1376 builtin_driver(__pci_driver, pci_register_driver)
1377
1378struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1379int pci_add_dynid(struct pci_driver *drv,
1380 unsigned int vendor, unsigned int device,
1381 unsigned int subvendor, unsigned int subdevice,
1382 unsigned int class, unsigned int class_mask,
1383 unsigned long driver_data);
1384const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1385 struct pci_dev *dev);
1386int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1387 int pass);
1388
1389void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1390 void *userdata);
1391int pci_cfg_space_size(struct pci_dev *dev);
1392unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1393void pci_setup_bridge(struct pci_bus *bus);
1394resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1395 unsigned long type);
1396
1397#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1398#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1399
1400int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1401 unsigned int command_bits, u32 flags);
1402
David Brazdil0f672f62019-12-10 10:32:29 +00001403/*
1404 * Virtual interrupts allow for more interrupts to be allocated
1405 * than the device has interrupts for. These are not programmed
1406 * into the device's MSI-X table and must be handled by some
1407 * other driver means.
1408 */
1409#define PCI_IRQ_VIRTUAL (1 << 4)
1410
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001411#define PCI_IRQ_ALL_TYPES \
1412 (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1413
1414/* kmem_cache style wrapper around pci_alloc_consistent() */
1415
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001416#include <linux/dmapool.h>
1417
1418#define pci_pool dma_pool
1419#define pci_pool_create(name, pdev, size, align, allocation) \
1420 dma_pool_create(name, &pdev->dev, size, align, allocation)
1421#define pci_pool_destroy(pool) dma_pool_destroy(pool)
1422#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
1423#define pci_pool_zalloc(pool, flags, handle) \
1424 dma_pool_zalloc(pool, flags, handle)
1425#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
1426
1427struct msix_entry {
1428 u32 vector; /* Kernel uses to write allocated vector */
1429 u16 entry; /* Driver uses to specify entry, OS writes */
1430};
1431
1432#ifdef CONFIG_PCI_MSI
1433int pci_msi_vec_count(struct pci_dev *dev);
1434void pci_disable_msi(struct pci_dev *dev);
1435int pci_msix_vec_count(struct pci_dev *dev);
1436void pci_disable_msix(struct pci_dev *dev);
1437void pci_restore_msi_state(struct pci_dev *dev);
1438int pci_msi_enabled(void);
1439int pci_enable_msi(struct pci_dev *dev);
1440int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1441 int minvec, int maxvec);
1442static inline int pci_enable_msix_exact(struct pci_dev *dev,
1443 struct msix_entry *entries, int nvec)
1444{
1445 int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1446 if (rc < 0)
1447 return rc;
1448 return 0;
1449}
1450int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1451 unsigned int max_vecs, unsigned int flags,
David Brazdil0f672f62019-12-10 10:32:29 +00001452 struct irq_affinity *affd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001453
1454void pci_free_irq_vectors(struct pci_dev *dev);
1455int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1456const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1457int pci_irq_get_node(struct pci_dev *pdev, int vec);
1458
1459#else
1460static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
1461static inline void pci_disable_msi(struct pci_dev *dev) { }
1462static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
1463static inline void pci_disable_msix(struct pci_dev *dev) { }
1464static inline void pci_restore_msi_state(struct pci_dev *dev) { }
1465static inline int pci_msi_enabled(void) { return 0; }
1466static inline int pci_enable_msi(struct pci_dev *dev)
1467{ return -ENOSYS; }
1468static inline int pci_enable_msix_range(struct pci_dev *dev,
1469 struct msix_entry *entries, int minvec, int maxvec)
1470{ return -ENOSYS; }
1471static inline int pci_enable_msix_exact(struct pci_dev *dev,
1472 struct msix_entry *entries, int nvec)
1473{ return -ENOSYS; }
1474
1475static inline int
1476pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1477 unsigned int max_vecs, unsigned int flags,
David Brazdil0f672f62019-12-10 10:32:29 +00001478 struct irq_affinity *aff_desc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001479{
1480 if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
1481 return 1;
1482 return -ENOSPC;
1483}
1484
1485static inline void pci_free_irq_vectors(struct pci_dev *dev)
1486{
1487}
1488
1489static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1490{
1491 if (WARN_ON_ONCE(nr > 0))
1492 return -EINVAL;
1493 return dev->irq;
1494}
1495static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1496 int vec)
1497{
1498 return cpu_possible_mask;
1499}
1500
1501static inline int pci_irq_get_node(struct pci_dev *pdev, int vec)
1502{
1503 return first_online_node;
1504}
1505#endif
1506
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001507/**
1508 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1509 * @d: the INTx IRQ domain
1510 * @node: the DT node for the device whose interrupt we're translating
1511 * @intspec: the interrupt specifier data from the DT
1512 * @intsize: the number of entries in @intspec
1513 * @out_hwirq: pointer at which to write the hwirq number
1514 * @out_type: pointer at which to write the interrupt type
1515 *
1516 * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1517 * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1518 * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1519 * INTx value to obtain the hwirq number.
1520 *
1521 * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1522 */
1523static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1524 struct device_node *node,
1525 const u32 *intspec,
1526 unsigned int intsize,
1527 unsigned long *out_hwirq,
1528 unsigned int *out_type)
1529{
1530 const u32 intx = intspec[0];
1531
1532 if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1533 return -EINVAL;
1534
1535 *out_hwirq = intx - PCI_INTERRUPT_INTA;
1536 return 0;
1537}
1538
1539#ifdef CONFIG_PCIEPORTBUS
1540extern bool pcie_ports_disabled;
1541extern bool pcie_ports_native;
1542#else
1543#define pcie_ports_disabled true
1544#define pcie_ports_native false
1545#endif
1546
David Brazdil0f672f62019-12-10 10:32:29 +00001547#define PCIE_LINK_STATE_L0S 1
1548#define PCIE_LINK_STATE_L1 2
1549#define PCIE_LINK_STATE_CLKPM 4
1550
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001551#ifdef CONFIG_PCIEASPM
David Brazdil0f672f62019-12-10 10:32:29 +00001552int pci_disable_link_state(struct pci_dev *pdev, int state);
1553int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1554void pcie_no_aspm(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001555bool pcie_aspm_support_enabled(void);
David Brazdil0f672f62019-12-10 10:32:29 +00001556bool pcie_aspm_enabled(struct pci_dev *pdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001557#else
David Brazdil0f672f62019-12-10 10:32:29 +00001558static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1559{ return 0; }
1560static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1561{ return 0; }
1562static inline void pcie_no_aspm(void) { }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001563static inline bool pcie_aspm_support_enabled(void) { return false; }
David Brazdil0f672f62019-12-10 10:32:29 +00001564static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001565#endif
1566
1567#ifdef CONFIG_PCIEAER
1568bool pci_aer_available(void);
1569#else
1570static inline bool pci_aer_available(void) { return false; }
1571#endif
1572
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001573bool pci_ats_disabled(void);
1574
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001575void pci_cfg_access_lock(struct pci_dev *dev);
1576bool pci_cfg_access_trylock(struct pci_dev *dev);
1577void pci_cfg_access_unlock(struct pci_dev *dev);
1578
1579/*
1580 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1581 * a PCI domain is defined to be a set of PCI buses which share
1582 * configuration space.
1583 */
1584#ifdef CONFIG_PCI_DOMAINS
1585extern int pci_domains_supported;
1586#else
1587enum { pci_domains_supported = 0 };
1588static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1589static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1590#endif /* CONFIG_PCI_DOMAINS */
1591
1592/*
1593 * Generic implementation for PCI domain support. If your
1594 * architecture does not need custom management of PCI
1595 * domains then this implementation will be used
1596 */
1597#ifdef CONFIG_PCI_DOMAINS_GENERIC
1598static inline int pci_domain_nr(struct pci_bus *bus)
1599{
1600 return bus->domain_nr;
1601}
1602#ifdef CONFIG_ACPI
1603int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1604#else
1605static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1606{ return 0; }
1607#endif
1608int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1609#endif
1610
1611/* Some architectures require additional setup to direct VGA traffic */
1612typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1613 unsigned int command_bits, u32 flags);
1614void pci_register_set_vga_state(arch_set_vga_state_t func);
1615
1616static inline int
1617pci_request_io_regions(struct pci_dev *pdev, const char *name)
1618{
1619 return pci_request_selected_regions(pdev,
1620 pci_select_bars(pdev, IORESOURCE_IO), name);
1621}
1622
1623static inline void
1624pci_release_io_regions(struct pci_dev *pdev)
1625{
1626 return pci_release_selected_regions(pdev,
1627 pci_select_bars(pdev, IORESOURCE_IO));
1628}
1629
1630static inline int
1631pci_request_mem_regions(struct pci_dev *pdev, const char *name)
1632{
1633 return pci_request_selected_regions(pdev,
1634 pci_select_bars(pdev, IORESOURCE_MEM), name);
1635}
1636
1637static inline void
1638pci_release_mem_regions(struct pci_dev *pdev)
1639{
1640 return pci_release_selected_regions(pdev,
1641 pci_select_bars(pdev, IORESOURCE_MEM));
1642}
1643
1644#else /* CONFIG_PCI is not enabled */
1645
1646static inline void pci_set_flags(int flags) { }
1647static inline void pci_add_flags(int flags) { }
1648static inline void pci_clear_flags(int flags) { }
1649static inline int pci_has_flag(int flag) { return 0; }
1650
1651/*
1652 * If the system does not have PCI, clearly these return errors. Define
1653 * these as simple inline functions to avoid hair in drivers.
1654 */
1655#define _PCI_NOP(o, s, t) \
1656 static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1657 int where, t val) \
1658 { return PCIBIOS_FUNC_NOT_SUPPORTED; }
1659
1660#define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \
1661 _PCI_NOP(o, word, u16 x) \
1662 _PCI_NOP(o, dword, u32 x)
1663_PCI_NOP_ALL(read, *)
1664_PCI_NOP_ALL(write,)
1665
1666static inline struct pci_dev *pci_get_device(unsigned int vendor,
1667 unsigned int device,
1668 struct pci_dev *from)
1669{ return NULL; }
1670
1671static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
1672 unsigned int device,
1673 unsigned int ss_vendor,
1674 unsigned int ss_device,
1675 struct pci_dev *from)
1676{ return NULL; }
1677
1678static inline struct pci_dev *pci_get_class(unsigned int class,
1679 struct pci_dev *from)
1680{ return NULL; }
1681
1682#define pci_dev_present(ids) (0)
1683#define no_pci_devices() (1)
1684#define pci_dev_put(dev) do { } while (0)
1685
1686static inline void pci_set_master(struct pci_dev *dev) { }
1687static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
1688static inline void pci_disable_device(struct pci_dev *dev) { }
1689static inline int pci_assign_resource(struct pci_dev *dev, int i)
1690{ return -EBUSY; }
Olivier Deprez0e641232021-09-23 10:07:05 +02001691static inline int __must_check __pci_register_driver(struct pci_driver *drv,
1692 struct module *owner,
1693 const char *mod_name)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001694{ return 0; }
1695static inline int pci_register_driver(struct pci_driver *drv)
1696{ return 0; }
1697static inline void pci_unregister_driver(struct pci_driver *drv) { }
1698static inline int pci_find_capability(struct pci_dev *dev, int cap)
1699{ return 0; }
1700static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
1701 int cap)
1702{ return 0; }
1703static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
1704{ return 0; }
1705
1706/* Power management related routines */
1707static inline int pci_save_state(struct pci_dev *dev) { return 0; }
1708static inline void pci_restore_state(struct pci_dev *dev) { }
1709static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1710{ return 0; }
1711static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1712{ return 0; }
1713static inline pci_power_t pci_choose_state(struct pci_dev *dev,
1714 pm_message_t state)
1715{ return PCI_D0; }
1716static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1717 int enable)
1718{ return 0; }
1719
1720static inline struct resource *pci_find_resource(struct pci_dev *dev,
1721 struct resource *res)
1722{ return NULL; }
1723static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
1724{ return -EIO; }
1725static inline void pci_release_regions(struct pci_dev *dev) { }
1726
1727static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
1728
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001729static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
1730{ return NULL; }
1731static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
1732 unsigned int devfn)
1733{ return NULL; }
1734static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
1735 unsigned int bus, unsigned int devfn)
1736{ return NULL; }
1737
1738static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1739static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
1740
1741#define dev_is_pci(d) (false)
1742#define dev_is_pf(d) (false)
1743static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
1744{ return false; }
1745static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1746 struct device_node *node,
1747 const u32 *intspec,
1748 unsigned int intsize,
1749 unsigned long *out_hwirq,
1750 unsigned int *out_type)
1751{ return -EINVAL; }
David Brazdil0f672f62019-12-10 10:32:29 +00001752
1753static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1754 struct pci_dev *dev)
1755{ return NULL; }
1756static inline bool pci_ats_disabled(void) { return true; }
1757
1758static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1759{
1760 return -EINVAL;
1761}
1762
1763static inline int
1764pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1765 unsigned int max_vecs, unsigned int flags,
1766 struct irq_affinity *aff_desc)
1767{
1768 return -ENOSPC;
1769}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001770#endif /* CONFIG_PCI */
1771
David Brazdil0f672f62019-12-10 10:32:29 +00001772static inline int
1773pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1774 unsigned int max_vecs, unsigned int flags)
1775{
1776 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
1777 NULL);
1778}
1779
1780#ifdef CONFIG_PCI_ATS
1781/* Address Translation Service */
1782int pci_enable_ats(struct pci_dev *dev, int ps);
1783void pci_disable_ats(struct pci_dev *dev);
1784int pci_ats_queue_depth(struct pci_dev *dev);
1785int pci_ats_page_aligned(struct pci_dev *dev);
1786#else
1787static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
1788static inline void pci_disable_ats(struct pci_dev *d) { }
1789static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
1790static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
1791#endif
1792
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001793/* Include architecture-dependent settings and functions */
1794
1795#include <asm/pci.h>
1796
David Brazdil0f672f62019-12-10 10:32:29 +00001797/* These two functions provide almost identical functionality. Depending
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001798 * on the architecture, one will be implemented as a wrapper around the
1799 * other (in drivers/pci/mmap.c).
1800 *
1801 * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
1802 * is expected to be an offset within that region.
1803 *
1804 * pci_mmap_page_range() is the legacy architecture-specific interface,
1805 * which accepts a "user visible" resource address converted by
1806 * pci_resource_to_user(), as used in the legacy mmap() interface in
1807 * /proc/bus/pci/.
1808 */
1809int pci_mmap_resource_range(struct pci_dev *dev, int bar,
1810 struct vm_area_struct *vma,
1811 enum pci_mmap_state mmap_state, int write_combine);
1812int pci_mmap_page_range(struct pci_dev *pdev, int bar,
1813 struct vm_area_struct *vma,
1814 enum pci_mmap_state mmap_state, int write_combine);
1815
1816#ifndef arch_can_pci_mmap_wc
1817#define arch_can_pci_mmap_wc() 0
1818#endif
1819
1820#ifndef arch_can_pci_mmap_io
1821#define arch_can_pci_mmap_io() 0
1822#define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
1823#else
1824int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
1825#endif
1826
1827#ifndef pci_root_bus_fwnode
1828#define pci_root_bus_fwnode(bus) NULL
1829#endif
1830
1831/*
1832 * These helpers provide future and backwards compatibility
1833 * for accessing popular PCI BAR info
1834 */
1835#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
1836#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
1837#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
1838#define pci_resource_len(dev,bar) \
1839 ((pci_resource_start((dev), (bar)) == 0 && \
1840 pci_resource_end((dev), (bar)) == \
1841 pci_resource_start((dev), (bar))) ? 0 : \
1842 \
1843 (pci_resource_end((dev), (bar)) - \
1844 pci_resource_start((dev), (bar)) + 1))
1845
1846/*
1847 * Similar to the helpers above, these manipulate per-pci_dev
1848 * driver-specific data. They are really just a wrapper around
1849 * the generic device structure functions of these calls.
1850 */
1851static inline void *pci_get_drvdata(struct pci_dev *pdev)
1852{
1853 return dev_get_drvdata(&pdev->dev);
1854}
1855
1856static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
1857{
1858 dev_set_drvdata(&pdev->dev, data);
1859}
1860
1861static inline const char *pci_name(const struct pci_dev *pdev)
1862{
1863 return dev_name(&pdev->dev);
1864}
1865
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001866void pci_resource_to_user(const struct pci_dev *dev, int bar,
1867 const struct resource *rsrc,
1868 resource_size_t *start, resource_size_t *end);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001869
1870/*
1871 * The world is not perfect and supplies us with broken PCI devices.
1872 * For at least a part of these bugs we need a work-around, so both
1873 * generic (drivers/pci/quirks.c) and per-architecture code can define
1874 * fixup hooks to be called for particular buggy devices.
1875 */
1876
1877struct pci_fixup {
1878 u16 vendor; /* Or PCI_ANY_ID */
1879 u16 device; /* Or PCI_ANY_ID */
1880 u32 class; /* Or PCI_ANY_ID */
1881 unsigned int class_shift; /* should be 0, 8, 16 */
1882#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
1883 int hook_offset;
1884#else
1885 void (*hook)(struct pci_dev *dev);
1886#endif
1887};
1888
1889enum pci_fixup_pass {
1890 pci_fixup_early, /* Before probing BARs */
1891 pci_fixup_header, /* After reading configuration header */
1892 pci_fixup_final, /* Final phase of device fixups */
1893 pci_fixup_enable, /* pci_enable_device() time */
1894 pci_fixup_resume, /* pci_device_resume() */
1895 pci_fixup_suspend, /* pci_device_suspend() */
1896 pci_fixup_resume_early, /* pci_device_resume_early() */
1897 pci_fixup_suspend_late, /* pci_device_suspend_late() */
1898};
1899
1900#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
1901#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
1902 class_shift, hook) \
1903 __ADDRESSABLE(hook) \
1904 asm(".section " #sec ", \"a\" \n" \
1905 ".balign 16 \n" \
1906 ".short " #vendor ", " #device " \n" \
1907 ".long " #class ", " #class_shift " \n" \
1908 ".long " #hook " - . \n" \
1909 ".previous \n");
1910#define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
1911 class_shift, hook) \
1912 __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
1913 class_shift, hook)
1914#else
1915/* Anonymous variables would be nice... */
1916#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
1917 class_shift, hook) \
1918 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
1919 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
1920 = { vendor, device, class, class_shift, hook };
1921#endif
1922
1923#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
1924 class_shift, hook) \
1925 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1926 hook, vendor, device, class, class_shift, hook)
1927#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
1928 class_shift, hook) \
1929 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1930 hook, vendor, device, class, class_shift, hook)
1931#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
1932 class_shift, hook) \
1933 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1934 hook, vendor, device, class, class_shift, hook)
1935#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
1936 class_shift, hook) \
1937 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1938 hook, vendor, device, class, class_shift, hook)
1939#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
1940 class_shift, hook) \
1941 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1942 resume##hook, vendor, device, class, class_shift, hook)
1943#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
1944 class_shift, hook) \
1945 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1946 resume_early##hook, vendor, device, class, class_shift, hook)
1947#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
1948 class_shift, hook) \
1949 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1950 suspend##hook, vendor, device, class, class_shift, hook)
1951#define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
1952 class_shift, hook) \
1953 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1954 suspend_late##hook, vendor, device, class, class_shift, hook)
1955
1956#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1957 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1958 hook, vendor, device, PCI_ANY_ID, 0, hook)
1959#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
1960 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1961 hook, vendor, device, PCI_ANY_ID, 0, hook)
1962#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
1963 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1964 hook, vendor, device, PCI_ANY_ID, 0, hook)
1965#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
1966 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1967 hook, vendor, device, PCI_ANY_ID, 0, hook)
1968#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1969 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1970 resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
1971#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1972 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1973 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
1974#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1975 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1976 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
1977#define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
1978 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1979 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
1980
1981#ifdef CONFIG_PCI_QUIRKS
1982void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
1983#else
1984static inline void pci_fixup_device(enum pci_fixup_pass pass,
1985 struct pci_dev *dev) { }
1986#endif
1987
1988void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
1989void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
1990void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
1991int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
1992int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
1993 const char *name);
1994void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
1995
1996extern int pci_pci_problems;
1997#define PCIPCI_FAIL 1 /* No PCI PCI DMA */
1998#define PCIPCI_TRITON 2
1999#define PCIPCI_NATOMA 4
2000#define PCIPCI_VIAETBF 8
2001#define PCIPCI_VSFX 16
2002#define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
2003#define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
2004
2005extern unsigned long pci_cardbus_io_size;
2006extern unsigned long pci_cardbus_mem_size;
2007extern u8 pci_dfl_cache_line_size;
2008extern u8 pci_cache_line_size;
2009
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002010/* Architecture-specific versions may override these (weak) */
2011void pcibios_disable_device(struct pci_dev *dev);
2012void pcibios_set_master(struct pci_dev *dev);
2013int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2014 enum pcie_reset_state state);
2015int pcibios_add_device(struct pci_dev *dev);
2016void pcibios_release_device(struct pci_dev *dev);
David Brazdil0f672f62019-12-10 10:32:29 +00002017#ifdef CONFIG_PCI
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002018void pcibios_penalize_isa_irq(int irq, int active);
David Brazdil0f672f62019-12-10 10:32:29 +00002019#else
2020static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2021#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002022int pcibios_alloc_irq(struct pci_dev *dev);
2023void pcibios_free_irq(struct pci_dev *dev);
2024resource_size_t pcibios_default_alignment(void);
2025
2026#ifdef CONFIG_HIBERNATE_CALLBACKS
2027extern struct dev_pm_ops pcibios_pm_ops;
2028#endif
2029
2030#if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2031void __init pci_mmcfg_early_init(void);
2032void __init pci_mmcfg_late_init(void);
2033#else
2034static inline void pci_mmcfg_early_init(void) { }
2035static inline void pci_mmcfg_late_init(void) { }
2036#endif
2037
2038int pci_ext_cfg_avail(void);
2039
2040void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2041void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2042
2043#ifdef CONFIG_PCI_IOV
2044int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2045int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2046
2047int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2048void pci_disable_sriov(struct pci_dev *dev);
2049int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2050void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2051int pci_num_vf(struct pci_dev *dev);
2052int pci_vfs_assigned(struct pci_dev *dev);
2053int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2054int pci_sriov_get_totalvfs(struct pci_dev *dev);
2055int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2056resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2057void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2058
2059/* Arch may override these (weak) */
2060int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2061int pcibios_sriov_disable(struct pci_dev *pdev);
2062resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2063#else
2064static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2065{
2066 return -ENOSYS;
2067}
2068static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2069{
2070 return -ENOSYS;
2071}
2072static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2073{ return -ENODEV; }
2074static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2075{
2076 return -ENOSYS;
2077}
2078static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2079 int id) { }
2080static inline void pci_disable_sriov(struct pci_dev *dev) { }
2081static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
2082static inline int pci_vfs_assigned(struct pci_dev *dev)
2083{ return 0; }
2084static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2085{ return 0; }
2086static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2087{ return 0; }
2088#define pci_sriov_configure_simple NULL
2089static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2090{ return 0; }
2091static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2092#endif
2093
2094#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
2095void pci_hp_create_module_link(struct pci_slot *pci_slot);
2096void pci_hp_remove_module_link(struct pci_slot *pci_slot);
2097#endif
2098
2099/**
2100 * pci_pcie_cap - get the saved PCIe capability offset
2101 * @dev: PCI device
2102 *
2103 * PCIe capability offset is calculated at PCI device initialization
2104 * time and saved in the data structure. This function returns saved
2105 * PCIe capability offset. Using this instead of pci_find_capability()
2106 * reduces unnecessary search in the PCI configuration space. If you
2107 * need to calculate PCIe capability offset from raw device for some
2108 * reasons, please use pci_find_capability() instead.
2109 */
2110static inline int pci_pcie_cap(struct pci_dev *dev)
2111{
2112 return dev->pcie_cap;
2113}
2114
2115/**
2116 * pci_is_pcie - check if the PCI device is PCI Express capable
2117 * @dev: PCI device
2118 *
2119 * Returns: true if the PCI device is PCI Express capable, false otherwise.
2120 */
2121static inline bool pci_is_pcie(struct pci_dev *dev)
2122{
2123 return pci_pcie_cap(dev);
2124}
2125
2126/**
2127 * pcie_caps_reg - get the PCIe Capabilities Register
2128 * @dev: PCI device
2129 */
2130static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2131{
2132 return dev->pcie_flags_reg;
2133}
2134
2135/**
2136 * pci_pcie_type - get the PCIe device/port type
2137 * @dev: PCI device
2138 */
2139static inline int pci_pcie_type(const struct pci_dev *dev)
2140{
2141 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2142}
2143
2144static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2145{
2146 while (1) {
2147 if (!pci_is_pcie(dev))
2148 break;
2149 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2150 return dev;
2151 if (!dev->bus->self)
2152 break;
2153 dev = dev->bus->self;
2154 }
2155 return NULL;
2156}
2157
2158void pci_request_acs(void);
2159bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2160bool pci_acs_path_enabled(struct pci_dev *start,
2161 struct pci_dev *end, u16 acs_flags);
2162int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2163
2164#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
2165#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
2166
2167/* Large Resource Data Type Tag Item Names */
2168#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
2169#define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */
2170#define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */
2171
2172#define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2173#define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2174#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2175
2176/* Small Resource Data Type Tag Item Names */
2177#define PCI_VPD_STIN_END 0x0f /* End */
2178
2179#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
2180
2181#define PCI_VPD_SRDT_TIN_MASK 0x78
2182#define PCI_VPD_SRDT_LEN_MASK 0x07
2183#define PCI_VPD_LRDT_TIN_MASK 0x7f
2184
2185#define PCI_VPD_LRDT_TAG_SIZE 3
2186#define PCI_VPD_SRDT_TAG_SIZE 1
2187
2188#define PCI_VPD_INFO_FLD_HDR_SIZE 3
2189
2190#define PCI_VPD_RO_KEYWORD_PARTNO "PN"
2191#define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
2192#define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
2193#define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
2194
2195/**
2196 * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
2197 * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
2198 *
2199 * Returns the extracted Large Resource Data Type length.
2200 */
2201static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
2202{
2203 return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
2204}
2205
2206/**
2207 * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
2208 * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
2209 *
2210 * Returns the extracted Large Resource Data Type Tag item.
2211 */
2212static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
2213{
2214 return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
2215}
2216
2217/**
2218 * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
2219 * @srdt: Pointer to the beginning of the Small Resource Data Type tag
2220 *
2221 * Returns the extracted Small Resource Data Type length.
2222 */
2223static inline u8 pci_vpd_srdt_size(const u8 *srdt)
2224{
2225 return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
2226}
2227
2228/**
2229 * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
2230 * @srdt: Pointer to the beginning of the Small Resource Data Type tag
2231 *
2232 * Returns the extracted Small Resource Data Type Tag Item.
2233 */
2234static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
2235{
2236 return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
2237}
2238
2239/**
2240 * pci_vpd_info_field_size - Extracts the information field length
David Brazdil0f672f62019-12-10 10:32:29 +00002241 * @info_field: Pointer to the beginning of an information field header
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002242 *
2243 * Returns the extracted information field length.
2244 */
2245static inline u8 pci_vpd_info_field_size(const u8 *info_field)
2246{
2247 return info_field[2];
2248}
2249
2250/**
2251 * pci_vpd_find_tag - Locates the Resource Data Type tag provided
2252 * @buf: Pointer to buffered vpd data
2253 * @off: The offset into the buffer at which to begin the search
2254 * @len: The length of the vpd buffer
2255 * @rdt: The Resource Data Type to search for
2256 *
2257 * Returns the index where the Resource Data Type was found or
2258 * -ENOENT otherwise.
2259 */
2260int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
2261
2262/**
2263 * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
2264 * @buf: Pointer to buffered vpd data
2265 * @off: The offset into the buffer at which to begin the search
2266 * @len: The length of the buffer area, relative to off, in which to search
2267 * @kw: The keyword to search for
2268 *
2269 * Returns the index where the information field keyword was found or
2270 * -ENOENT otherwise.
2271 */
2272int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
2273 unsigned int len, const char *kw);
2274
2275/* PCI <-> OF binding helpers */
2276#ifdef CONFIG_OF
2277struct device_node;
2278struct irq_domain;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002279struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2280int pci_parse_request_of_pci_ranges(struct device *dev,
2281 struct list_head *resources,
2282 struct resource **bus_range);
2283
2284/* Arch may override this (weak) */
2285struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2286
2287#else /* CONFIG_OF */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002288static inline struct irq_domain *
2289pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
2290static inline int pci_parse_request_of_pci_ranges(struct device *dev,
2291 struct list_head *resources,
2292 struct resource **bus_range)
2293{
2294 return -EINVAL;
2295}
2296#endif /* CONFIG_OF */
2297
2298static inline struct device_node *
2299pci_device_to_OF_node(const struct pci_dev *pdev)
2300{
2301 return pdev ? pdev->dev.of_node : NULL;
2302}
2303
2304static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2305{
2306 return bus ? bus->dev.of_node : NULL;
2307}
2308
2309#ifdef CONFIG_ACPI
2310struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2311
2312void
2313pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
Olivier Deprez0e641232021-09-23 10:07:05 +02002314bool pci_pr3_present(struct pci_dev *pdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002315#else
2316static inline struct irq_domain *
2317pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
Olivier Deprez0e641232021-09-23 10:07:05 +02002318static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002319#endif
2320
2321#ifdef CONFIG_EEH
2322static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2323{
2324 return pdev->dev.archdata.edev;
2325}
2326#endif
2327
Olivier Deprez0e641232021-09-23 10:07:05 +02002328void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002329bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2330int pci_for_each_dma_alias(struct pci_dev *pdev,
2331 int (*fn)(struct pci_dev *pdev,
2332 u16 alias, void *data), void *data);
2333
2334/* Helper functions for operation of device flag */
2335static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2336{
2337 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2338}
2339static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2340{
2341 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2342}
2343static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2344{
2345 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2346}
2347
2348/**
2349 * pci_ari_enabled - query ARI forwarding status
2350 * @bus: the PCI bus
2351 *
2352 * Returns true if ARI forwarding is enabled.
2353 */
2354static inline bool pci_ari_enabled(struct pci_bus *bus)
2355{
2356 return bus->self && bus->self->ari_enabled;
2357}
2358
2359/**
2360 * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2361 * @pdev: PCI device to check
2362 *
2363 * Walk upwards from @pdev and check for each encountered bridge if it's part
2364 * of a Thunderbolt controller. Reaching the host bridge means @pdev is not
2365 * Thunderbolt-attached. (But rather soldered to the mainboard usually.)
2366 */
2367static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2368{
2369 struct pci_dev *parent = pdev;
2370
2371 if (pdev->is_thunderbolt)
2372 return true;
2373
2374 while ((parent = pci_upstream_bridge(parent)))
2375 if (parent->is_thunderbolt)
2376 return true;
2377
2378 return false;
2379}
2380
2381#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
2382void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
2383#endif
2384
2385/* Provide the legacy pci_dma_* API */
2386#include <linux/pci-dma-compat.h>
2387
2388#define pci_printk(level, pdev, fmt, arg...) \
2389 dev_printk(level, &(pdev)->dev, fmt, ##arg)
2390
2391#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
2392#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
2393#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
2394#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
2395#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
2396#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
2397#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
2398#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
2399
David Brazdil0f672f62019-12-10 10:32:29 +00002400#define pci_notice_ratelimited(pdev, fmt, arg...) \
2401 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2402
2403#define pci_info_ratelimited(pdev, fmt, arg...) \
2404 dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2405
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002406#endif /* LINUX_PCI_H */