blob: 11627345464810fc4626de21b7e063d6779b492f [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 *
5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6 * David Mosberger-Tang
7 *
8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9 */
10
11#include <linux/acpi.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/dmi.h>
15#include <linux/init.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020016#include <linux/msi.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017#include <linux/of.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018#include <linux/pci.h>
19#include <linux/pm.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/string.h>
24#include <linux/log2.h>
25#include <linux/logic_pio.h>
26#include <linux/pm_wakeup.h>
27#include <linux/interrupt.h>
28#include <linux/device.h>
29#include <linux/pm_runtime.h>
30#include <linux/pci_hotplug.h>
31#include <linux/vmalloc.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032#include <asm/dma.h>
33#include <linux/aer.h>
34#include "pci.h"
35
David Brazdil0f672f62019-12-10 10:32:29 +000036DEFINE_MUTEX(pci_slot_mutex);
37
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038const char *pci_power_names[] = {
39 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
40};
41EXPORT_SYMBOL_GPL(pci_power_names);
42
43int isa_dma_bridge_buggy;
44EXPORT_SYMBOL(isa_dma_bridge_buggy);
45
46int pci_pci_problems;
47EXPORT_SYMBOL(pci_pci_problems);
48
Olivier Deprez157378f2022-04-04 15:47:50 +020049unsigned int pci_pm_d3hot_delay;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050
51static void pci_pme_list_scan(struct work_struct *work);
52
53static LIST_HEAD(pci_pme_list);
54static DEFINE_MUTEX(pci_pme_list_mutex);
55static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
56
57struct pci_pme_device {
58 struct list_head list;
59 struct pci_dev *dev;
60};
61
62#define PME_TIMEOUT 1000 /* How long between PME checks */
63
64static void pci_dev_d3_sleep(struct pci_dev *dev)
65{
Olivier Deprez157378f2022-04-04 15:47:50 +020066 unsigned int delay = dev->d3hot_delay;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067
Olivier Deprez157378f2022-04-04 15:47:50 +020068 if (delay < pci_pm_d3hot_delay)
69 delay = pci_pm_d3hot_delay;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070
71 if (delay)
72 msleep(delay);
73}
74
75#ifdef CONFIG_PCI_DOMAINS
76int pci_domains_supported = 1;
77#endif
78
79#define DEFAULT_CARDBUS_IO_SIZE (256)
80#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
81/* pci=cbmemsize=nnM,cbiosize=nn can override this */
82unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
83unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
84
85#define DEFAULT_HOTPLUG_IO_SIZE (256)
Olivier Deprez157378f2022-04-04 15:47:50 +020086#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
87#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
88/* hpiosize=nn can override this */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
Olivier Deprez157378f2022-04-04 15:47:50 +020090/*
91 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
92 * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
93 * pci=hpmemsize=nnM overrides both
94 */
95unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
96unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097
98#define DEFAULT_HOTPLUG_BUS_SIZE 1
99unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
100
Olivier Deprez157378f2022-04-04 15:47:50 +0200101
102/* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
103#ifdef CONFIG_PCIE_BUS_TUNE_OFF
104enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
105#elif defined CONFIG_PCIE_BUS_SAFE
106enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
107#elif defined CONFIG_PCIE_BUS_PERFORMANCE
108enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
109#elif defined CONFIG_PCIE_BUS_PEER2PEER
110enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
111#else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
Olivier Deprez157378f2022-04-04 15:47:50 +0200113#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114
115/*
116 * The default CLS is used if arch didn't set CLS explicitly and not
117 * all pci devices agree on the same value. Arch can override either
118 * the dfl or actual value as it sees fit. Don't forget this is
119 * measured in 32-bit words, not bytes.
120 */
121u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
122u8 pci_cache_line_size;
123
124/*
125 * If we set up a device for bus mastering, we need to check the latency
126 * timer as certain BIOSes forget to set it properly.
127 */
128unsigned int pcibios_max_latency = 255;
129
130/* If set, the PCIe ARI capability will not be used. */
131static bool pcie_ari_disabled;
132
133/* If set, the PCIe ATS capability will not be used. */
134static bool pcie_ats_disabled;
135
136/* If set, the PCI config space of each device is printed during boot. */
137bool pci_early_dump;
138
139bool pci_ats_disabled(void)
140{
141 return pcie_ats_disabled;
142}
Olivier Deprez157378f2022-04-04 15:47:50 +0200143EXPORT_SYMBOL_GPL(pci_ats_disabled);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000144
145/* Disable bridge_d3 for all PCIe ports */
146static bool pci_bridge_d3_disable;
147/* Force bridge_d3 for all PCIe ports */
148static bool pci_bridge_d3_force;
149
150static int __init pcie_port_pm_setup(char *str)
151{
152 if (!strcmp(str, "off"))
153 pci_bridge_d3_disable = true;
154 else if (!strcmp(str, "force"))
155 pci_bridge_d3_force = true;
156 return 1;
157}
158__setup("pcie_port_pm=", pcie_port_pm_setup);
159
160/* Time to wait after a reset for device to become responsive */
161#define PCIE_RESET_READY_POLL_MS 60000
162
163/**
164 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
165 * @bus: pointer to PCI bus structure to search
166 *
167 * Given a PCI bus, returns the highest PCI bus number present in the set
168 * including the given PCI bus and its list of child PCI buses.
169 */
170unsigned char pci_bus_max_busnr(struct pci_bus *bus)
171{
172 struct pci_bus *tmp;
173 unsigned char max, n;
174
175 max = bus->busn_res.end;
176 list_for_each_entry(tmp, &bus->children, node) {
177 n = pci_bus_max_busnr(tmp);
178 if (n > max)
179 max = n;
180 }
181 return max;
182}
183EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
184
Olivier Deprez157378f2022-04-04 15:47:50 +0200185/**
186 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
187 * @pdev: the PCI device
188 *
189 * Returns error bits set in PCI_STATUS and clears them.
190 */
191int pci_status_get_and_clear_errors(struct pci_dev *pdev)
192{
193 u16 status;
194 int ret;
195
196 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
197 if (ret != PCIBIOS_SUCCESSFUL)
198 return -EIO;
199
200 status &= PCI_STATUS_ERROR_BITS;
201 if (status)
202 pci_write_config_word(pdev, PCI_STATUS, status);
203
204 return status;
205}
206EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
207
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000208#ifdef CONFIG_HAS_IOMEM
209void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
210{
211 struct resource *res = &pdev->resource[bar];
212
213 /*
214 * Make sure the BAR is actually a memory resource, not an IO resource
215 */
216 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
217 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
218 return NULL;
219 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200220 return ioremap(res->start, resource_size(res));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000221}
222EXPORT_SYMBOL_GPL(pci_ioremap_bar);
223
224void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
225{
226 /*
227 * Make sure the BAR is actually a memory resource, not an IO resource
228 */
229 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
230 WARN_ON(1);
231 return NULL;
232 }
233 return ioremap_wc(pci_resource_start(pdev, bar),
234 pci_resource_len(pdev, bar));
235}
236EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
237#endif
238
239/**
240 * pci_dev_str_match_path - test if a path string matches a device
David Brazdil0f672f62019-12-10 10:32:29 +0000241 * @dev: the PCI device to test
242 * @path: string to match the device against
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000243 * @endptr: pointer to the string after the match
244 *
245 * Test if a string (typically from a kernel parameter) formatted as a
246 * path of device/function addresses matches a PCI device. The string must
247 * be of the form:
248 *
249 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
250 *
251 * A path for a device can be obtained using 'lspci -t'. Using a path
252 * is more robust against bus renumbering than using only a single bus,
253 * device and function address.
254 *
255 * Returns 1 if the string matches the device, 0 if it does not and
256 * a negative error code if it fails to parse the string.
257 */
258static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
259 const char **endptr)
260{
261 int ret;
262 int seg, bus, slot, func;
263 char *wpath, *p;
264 char end;
265
266 *endptr = strchrnul(path, ';');
267
Olivier Deprez0e641232021-09-23 10:07:05 +0200268 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000269 if (!wpath)
270 return -ENOMEM;
271
272 while (1) {
273 p = strrchr(wpath, '/');
274 if (!p)
275 break;
276 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
277 if (ret != 2) {
278 ret = -EINVAL;
279 goto free_and_exit;
280 }
281
282 if (dev->devfn != PCI_DEVFN(slot, func)) {
283 ret = 0;
284 goto free_and_exit;
285 }
286
287 /*
288 * Note: we don't need to get a reference to the upstream
289 * bridge because we hold a reference to the top level
290 * device which should hold a reference to the bridge,
291 * and so on.
292 */
293 dev = pci_upstream_bridge(dev);
294 if (!dev) {
295 ret = 0;
296 goto free_and_exit;
297 }
298
299 *p = 0;
300 }
301
302 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
303 &func, &end);
304 if (ret != 4) {
305 seg = 0;
306 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
307 if (ret != 3) {
308 ret = -EINVAL;
309 goto free_and_exit;
310 }
311 }
312
313 ret = (seg == pci_domain_nr(dev->bus) &&
314 bus == dev->bus->number &&
315 dev->devfn == PCI_DEVFN(slot, func));
316
317free_and_exit:
318 kfree(wpath);
319 return ret;
320}
321
322/**
323 * pci_dev_str_match - test if a string matches a device
David Brazdil0f672f62019-12-10 10:32:29 +0000324 * @dev: the PCI device to test
325 * @p: string to match the device against
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000326 * @endptr: pointer to the string after the match
327 *
328 * Test if a string (typically from a kernel parameter) matches a specified
329 * PCI device. The string may be of one of the following formats:
330 *
331 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
332 * pci:<vendor>:<device>[:<subvendor>:<subdevice>]
333 *
334 * The first format specifies a PCI bus/device/function address which
335 * may change if new hardware is inserted, if motherboard firmware changes,
336 * or due to changes caused in kernel parameters. If the domain is
337 * left unspecified, it is taken to be 0. In order to be robust against
338 * bus renumbering issues, a path of PCI device/function numbers may be used
339 * to address the specific device. The path for a device can be determined
340 * through the use of 'lspci -t'.
341 *
342 * The second format matches devices using IDs in the configuration
343 * space which may match multiple devices in the system. A value of 0
344 * for any field will match all devices. (Note: this differs from
345 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
346 * legacy reasons and convenience so users don't have to specify
347 * FFFFFFFFs on the command line.)
348 *
349 * Returns 1 if the string matches the device, 0 if it does not and
350 * a negative error code if the string cannot be parsed.
351 */
352static int pci_dev_str_match(struct pci_dev *dev, const char *p,
353 const char **endptr)
354{
355 int ret;
356 int count;
357 unsigned short vendor, device, subsystem_vendor, subsystem_device;
358
359 if (strncmp(p, "pci:", 4) == 0) {
360 /* PCI vendor/device (subvendor/subdevice) IDs are specified */
361 p += 4;
362 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
363 &subsystem_vendor, &subsystem_device, &count);
364 if (ret != 4) {
365 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
366 if (ret != 2)
367 return -EINVAL;
368
369 subsystem_vendor = 0;
370 subsystem_device = 0;
371 }
372
373 p += count;
374
375 if ((!vendor || vendor == dev->vendor) &&
376 (!device || device == dev->device) &&
377 (!subsystem_vendor ||
378 subsystem_vendor == dev->subsystem_vendor) &&
379 (!subsystem_device ||
380 subsystem_device == dev->subsystem_device))
381 goto found;
382 } else {
383 /*
384 * PCI Bus, Device, Function IDs are specified
David Brazdil0f672f62019-12-10 10:32:29 +0000385 * (optionally, may include a path of devfns following it)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000386 */
387 ret = pci_dev_str_match_path(dev, p, &p);
388 if (ret < 0)
389 return ret;
390 else if (ret)
391 goto found;
392 }
393
394 *endptr = p;
395 return 0;
396
397found:
398 *endptr = p;
399 return 1;
400}
401
402static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
403 u8 pos, int cap, int *ttl)
404{
405 u8 id;
406 u16 ent;
407
408 pci_bus_read_config_byte(bus, devfn, pos, &pos);
409
410 while ((*ttl)--) {
411 if (pos < 0x40)
412 break;
413 pos &= ~3;
414 pci_bus_read_config_word(bus, devfn, pos, &ent);
415
416 id = ent & 0xff;
417 if (id == 0xff)
418 break;
419 if (id == cap)
420 return pos;
421 pos = (ent >> 8);
422 }
423 return 0;
424}
425
426static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
427 u8 pos, int cap)
428{
429 int ttl = PCI_FIND_CAP_TTL;
430
431 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
432}
433
434int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
435{
436 return __pci_find_next_cap(dev->bus, dev->devfn,
437 pos + PCI_CAP_LIST_NEXT, cap);
438}
439EXPORT_SYMBOL_GPL(pci_find_next_capability);
440
441static int __pci_bus_find_cap_start(struct pci_bus *bus,
442 unsigned int devfn, u8 hdr_type)
443{
444 u16 status;
445
446 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
447 if (!(status & PCI_STATUS_CAP_LIST))
448 return 0;
449
450 switch (hdr_type) {
451 case PCI_HEADER_TYPE_NORMAL:
452 case PCI_HEADER_TYPE_BRIDGE:
453 return PCI_CAPABILITY_LIST;
454 case PCI_HEADER_TYPE_CARDBUS:
455 return PCI_CB_CAPABILITY_LIST;
456 }
457
458 return 0;
459}
460
461/**
462 * pci_find_capability - query for devices' capabilities
463 * @dev: PCI device to query
464 * @cap: capability code
465 *
466 * Tell if a device supports a given PCI capability.
467 * Returns the address of the requested capability structure within the
468 * device's PCI configuration space or 0 in case the device does not
David Brazdil0f672f62019-12-10 10:32:29 +0000469 * support it. Possible values for @cap include:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000470 *
471 * %PCI_CAP_ID_PM Power Management
472 * %PCI_CAP_ID_AGP Accelerated Graphics Port
473 * %PCI_CAP_ID_VPD Vital Product Data
474 * %PCI_CAP_ID_SLOTID Slot Identification
475 * %PCI_CAP_ID_MSI Message Signalled Interrupts
476 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
477 * %PCI_CAP_ID_PCIX PCI-X
478 * %PCI_CAP_ID_EXP PCI Express
479 */
480int pci_find_capability(struct pci_dev *dev, int cap)
481{
482 int pos;
483
484 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
485 if (pos)
486 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
487
488 return pos;
489}
490EXPORT_SYMBOL(pci_find_capability);
491
492/**
493 * pci_bus_find_capability - query for devices' capabilities
David Brazdil0f672f62019-12-10 10:32:29 +0000494 * @bus: the PCI bus to query
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000495 * @devfn: PCI device to query
David Brazdil0f672f62019-12-10 10:32:29 +0000496 * @cap: capability code
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000497 *
David Brazdil0f672f62019-12-10 10:32:29 +0000498 * Like pci_find_capability() but works for PCI devices that do not have a
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000499 * pci_dev structure set up yet.
500 *
501 * Returns the address of the requested capability structure within the
502 * device's PCI configuration space or 0 in case the device does not
503 * support it.
504 */
505int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
506{
507 int pos;
508 u8 hdr_type;
509
510 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
511
512 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
513 if (pos)
514 pos = __pci_find_next_cap(bus, devfn, pos, cap);
515
516 return pos;
517}
518EXPORT_SYMBOL(pci_bus_find_capability);
519
520/**
521 * pci_find_next_ext_capability - Find an extended capability
522 * @dev: PCI device to query
523 * @start: address at which to start looking (0 to start at beginning of list)
524 * @cap: capability code
525 *
526 * Returns the address of the next matching extended capability structure
527 * within the device's PCI configuration space or 0 if the device does
528 * not support it. Some capabilities can occur several times, e.g., the
529 * vendor-specific capability, and this provides a way to find them all.
530 */
531int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
532{
533 u32 header;
534 int ttl;
535 int pos = PCI_CFG_SPACE_SIZE;
536
537 /* minimum 8 bytes per capability */
538 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
539
540 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
541 return 0;
542
543 if (start)
544 pos = start;
545
546 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
547 return 0;
548
549 /*
550 * If we have no capabilities, this is indicated by cap ID,
551 * cap version and next pointer all being 0.
552 */
553 if (header == 0)
554 return 0;
555
556 while (ttl-- > 0) {
557 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
558 return pos;
559
560 pos = PCI_EXT_CAP_NEXT(header);
561 if (pos < PCI_CFG_SPACE_SIZE)
562 break;
563
564 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
565 break;
566 }
567
568 return 0;
569}
570EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
571
572/**
573 * pci_find_ext_capability - Find an extended capability
574 * @dev: PCI device to query
575 * @cap: capability code
576 *
577 * Returns the address of the requested extended capability structure
578 * within the device's PCI configuration space or 0 if the device does
David Brazdil0f672f62019-12-10 10:32:29 +0000579 * not support it. Possible values for @cap include:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000580 *
581 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
582 * %PCI_EXT_CAP_ID_VC Virtual Channel
583 * %PCI_EXT_CAP_ID_DSN Device Serial Number
584 * %PCI_EXT_CAP_ID_PWR Power Budgeting
585 */
586int pci_find_ext_capability(struct pci_dev *dev, int cap)
587{
588 return pci_find_next_ext_capability(dev, 0, cap);
589}
590EXPORT_SYMBOL_GPL(pci_find_ext_capability);
591
Olivier Deprez157378f2022-04-04 15:47:50 +0200592/**
593 * pci_get_dsn - Read and return the 8-byte Device Serial Number
594 * @dev: PCI device to query
595 *
596 * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
597 * Number.
598 *
599 * Returns the DSN, or zero if the capability does not exist.
600 */
601u64 pci_get_dsn(struct pci_dev *dev)
602{
603 u32 dword;
604 u64 dsn;
605 int pos;
606
607 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
608 if (!pos)
609 return 0;
610
611 /*
612 * The Device Serial Number is two dwords offset 4 bytes from the
613 * capability position. The specification says that the first dword is
614 * the lower half, and the second dword is the upper half.
615 */
616 pos += 4;
617 pci_read_config_dword(dev, pos, &dword);
618 dsn = (u64)dword;
619 pci_read_config_dword(dev, pos + 4, &dword);
620 dsn |= ((u64)dword) << 32;
621
622 return dsn;
623}
624EXPORT_SYMBOL_GPL(pci_get_dsn);
625
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000626static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
627{
628 int rc, ttl = PCI_FIND_CAP_TTL;
629 u8 cap, mask;
630
631 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
632 mask = HT_3BIT_CAP_MASK;
633 else
634 mask = HT_5BIT_CAP_MASK;
635
636 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
637 PCI_CAP_ID_HT, &ttl);
638 while (pos) {
639 rc = pci_read_config_byte(dev, pos + 3, &cap);
640 if (rc != PCIBIOS_SUCCESSFUL)
641 return 0;
642
643 if ((cap & mask) == ht_cap)
644 return pos;
645
646 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
647 pos + PCI_CAP_LIST_NEXT,
648 PCI_CAP_ID_HT, &ttl);
649 }
650
651 return 0;
652}
653/**
654 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
655 * @dev: PCI device to query
656 * @pos: Position from which to continue searching
657 * @ht_cap: Hypertransport capability code
658 *
659 * To be used in conjunction with pci_find_ht_capability() to search for
660 * all capabilities matching @ht_cap. @pos should always be a value returned
661 * from pci_find_ht_capability().
662 *
663 * NB. To be 100% safe against broken PCI devices, the caller should take
664 * steps to avoid an infinite loop.
665 */
666int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
667{
668 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
669}
670EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
671
672/**
673 * pci_find_ht_capability - query a device's Hypertransport capabilities
674 * @dev: PCI device to query
675 * @ht_cap: Hypertransport capability code
676 *
677 * Tell if a device supports a given Hypertransport capability.
678 * Returns an address within the device's PCI configuration space
679 * or 0 in case the device does not support the request capability.
680 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
681 * which has a Hypertransport capability matching @ht_cap.
682 */
683int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
684{
685 int pos;
686
687 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
688 if (pos)
689 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
690
691 return pos;
692}
693EXPORT_SYMBOL_GPL(pci_find_ht_capability);
694
695/**
David Brazdil0f672f62019-12-10 10:32:29 +0000696 * pci_find_parent_resource - return resource region of parent bus of given
697 * region
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000698 * @dev: PCI device structure contains resources to be searched
699 * @res: child resource record for which parent is sought
700 *
David Brazdil0f672f62019-12-10 10:32:29 +0000701 * For given resource region of given device, return the resource region of
702 * parent bus the given region is contained in.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000703 */
704struct resource *pci_find_parent_resource(const struct pci_dev *dev,
705 struct resource *res)
706{
707 const struct pci_bus *bus = dev->bus;
708 struct resource *r;
709 int i;
710
711 pci_bus_for_each_resource(bus, r, i) {
712 if (!r)
713 continue;
714 if (resource_contains(r, res)) {
715
716 /*
717 * If the window is prefetchable but the BAR is
718 * not, the allocator made a mistake.
719 */
720 if (r->flags & IORESOURCE_PREFETCH &&
721 !(res->flags & IORESOURCE_PREFETCH))
722 return NULL;
723
724 /*
725 * If we're below a transparent bridge, there may
726 * be both a positively-decoded aperture and a
727 * subtractively-decoded region that contain the BAR.
728 * We want the positively-decoded one, so this depends
729 * on pci_bus_for_each_resource() giving us those
730 * first.
731 */
732 return r;
733 }
734 }
735 return NULL;
736}
737EXPORT_SYMBOL(pci_find_parent_resource);
738
739/**
740 * pci_find_resource - Return matching PCI device resource
741 * @dev: PCI device to query
742 * @res: Resource to look for
743 *
744 * Goes over standard PCI resources (BARs) and checks if the given resource
745 * is partially or fully contained in any of them. In that case the
746 * matching resource is returned, %NULL otherwise.
747 */
748struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
749{
750 int i;
751
Olivier Deprez157378f2022-04-04 15:47:50 +0200752 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000753 struct resource *r = &dev->resource[i];
754
755 if (r->start && resource_contains(r, res))
756 return r;
757 }
758
759 return NULL;
760}
761EXPORT_SYMBOL(pci_find_resource);
762
763/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000764 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
765 * @dev: the PCI device to operate on
766 * @pos: config space offset of status word
767 * @mask: mask of bit(s) to care about in status word
768 *
769 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
770 */
771int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
772{
773 int i;
774
775 /* Wait for Transaction Pending bit clean */
776 for (i = 0; i < 4; i++) {
777 u16 status;
778 if (i)
779 msleep((1 << (i - 1)) * 100);
780
781 pci_read_config_word(dev, pos, &status);
782 if (!(status & mask))
783 return 1;
784 }
785
786 return 0;
787}
788
Olivier Deprez157378f2022-04-04 15:47:50 +0200789static int pci_acs_enable;
790
791/**
792 * pci_request_acs - ask for ACS to be enabled if supported
793 */
794void pci_request_acs(void)
795{
796 pci_acs_enable = 1;
797}
798
799static const char *disable_acs_redir_param;
800
801/**
802 * pci_disable_acs_redir - disable ACS redirect capabilities
803 * @dev: the PCI device
804 *
805 * For only devices specified in the disable_acs_redir parameter.
806 */
807static void pci_disable_acs_redir(struct pci_dev *dev)
808{
809 int ret = 0;
810 const char *p;
811 int pos;
812 u16 ctrl;
813
814 if (!disable_acs_redir_param)
815 return;
816
817 p = disable_acs_redir_param;
818 while (*p) {
819 ret = pci_dev_str_match(dev, p, &p);
820 if (ret < 0) {
821 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
822 disable_acs_redir_param);
823
824 break;
825 } else if (ret == 1) {
826 /* Found a match */
827 break;
828 }
829
830 if (*p != ';' && *p != ',') {
831 /* End of param or invalid format */
832 break;
833 }
834 p++;
835 }
836
837 if (ret != 1)
838 return;
839
840 if (!pci_dev_specific_disable_acs_redir(dev))
841 return;
842
843 pos = dev->acs_cap;
844 if (!pos) {
845 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
846 return;
847 }
848
849 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
850
851 /* P2P Request & Completion Redirect */
852 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
853
854 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
855
856 pci_info(dev, "disabled ACS redirect\n");
857}
858
859/**
860 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
861 * @dev: the PCI device
862 */
863static void pci_std_enable_acs(struct pci_dev *dev)
864{
865 int pos;
866 u16 cap;
867 u16 ctrl;
868
869 pos = dev->acs_cap;
870 if (!pos)
871 return;
872
873 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
874 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
875
876 /* Source Validation */
877 ctrl |= (cap & PCI_ACS_SV);
878
879 /* P2P Request Redirect */
880 ctrl |= (cap & PCI_ACS_RR);
881
882 /* P2P Completion Redirect */
883 ctrl |= (cap & PCI_ACS_CR);
884
885 /* Upstream Forwarding */
886 ctrl |= (cap & PCI_ACS_UF);
887
888 /* Enable Translation Blocking for external devices */
889 if (dev->external_facing || dev->untrusted)
890 ctrl |= (cap & PCI_ACS_TB);
891
892 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
893}
894
895/**
896 * pci_enable_acs - enable ACS if hardware support it
897 * @dev: the PCI device
898 */
899static void pci_enable_acs(struct pci_dev *dev)
900{
901 if (!pci_acs_enable)
902 goto disable_acs_redir;
903
904 if (!pci_dev_specific_enable_acs(dev))
905 goto disable_acs_redir;
906
907 pci_std_enable_acs(dev);
908
909disable_acs_redir:
910 /*
911 * Note: pci_disable_acs_redir() must be called even if ACS was not
912 * enabled by the kernel because it may have been enabled by
913 * platform firmware. So if we are told to disable it, we should
914 * always disable it after setting the kernel's default
915 * preferences.
916 */
917 pci_disable_acs_redir(dev);
918}
919
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000920/**
921 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
922 * @dev: PCI device to have its BARs restored
923 *
924 * Restore the BAR values for a given device, so as to make it
925 * accessible by its driver.
926 */
927static void pci_restore_bars(struct pci_dev *dev)
928{
929 int i;
930
931 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
932 pci_update_resource(dev, i);
933}
934
935static const struct pci_platform_pm_ops *pci_platform_pm;
936
937int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
938{
939 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
940 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
941 return -EINVAL;
942 pci_platform_pm = ops;
943 return 0;
944}
945
946static inline bool platform_pci_power_manageable(struct pci_dev *dev)
947{
948 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
949}
950
951static inline int platform_pci_set_power_state(struct pci_dev *dev,
952 pci_power_t t)
953{
954 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
955}
956
957static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
958{
959 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
960}
961
David Brazdil0f672f62019-12-10 10:32:29 +0000962static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
963{
964 if (pci_platform_pm && pci_platform_pm->refresh_state)
965 pci_platform_pm->refresh_state(dev);
966}
967
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000968static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
969{
970 return pci_platform_pm ?
971 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
972}
973
974static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
975{
976 return pci_platform_pm ?
977 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
978}
979
980static inline bool platform_pci_need_resume(struct pci_dev *dev)
981{
982 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
983}
984
David Brazdil0f672f62019-12-10 10:32:29 +0000985static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
986{
Olivier Deprez0e641232021-09-23 10:07:05 +0200987 if (pci_platform_pm && pci_platform_pm->bridge_d3)
988 return pci_platform_pm->bridge_d3(dev);
989 return false;
David Brazdil0f672f62019-12-10 10:32:29 +0000990}
991
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000992/**
993 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
David Brazdil0f672f62019-12-10 10:32:29 +0000994 * given PCI device
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000995 * @dev: PCI device to handle.
996 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
997 *
998 * RETURN VALUE:
999 * -EINVAL if the requested state is invalid.
1000 * -EIO if device does not support PCI PM or its PM capabilities register has a
1001 * wrong version, or device doesn't support the requested state.
1002 * 0 if device already is in the requested state.
1003 * 0 if device's power state has been successfully changed.
1004 */
1005static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1006{
1007 u16 pmcsr;
1008 bool need_restore = false;
1009
1010 /* Check if we're already there */
1011 if (dev->current_state == state)
1012 return 0;
1013
1014 if (!dev->pm_cap)
1015 return -EIO;
1016
1017 if (state < PCI_D0 || state > PCI_D3hot)
1018 return -EINVAL;
1019
David Brazdil0f672f62019-12-10 10:32:29 +00001020 /*
Olivier Deprez157378f2022-04-04 15:47:50 +02001021 * Validate transition: We can enter D0 from any state, but if
1022 * we're already in a low-power state, we can only go deeper. E.g.,
1023 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1024 * we'd have to go from D3 to D0, then to D1.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001025 */
1026 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
1027 && dev->current_state > state) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001028 pci_err(dev, "invalid power transition (from %s to %s)\n",
1029 pci_power_name(dev->current_state),
1030 pci_power_name(state));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001031 return -EINVAL;
1032 }
1033
David Brazdil0f672f62019-12-10 10:32:29 +00001034 /* Check if this device supports the desired state */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001035 if ((state == PCI_D1 && !dev->d1_support)
1036 || (state == PCI_D2 && !dev->d2_support))
1037 return -EIO;
1038
1039 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
Olivier Deprez157378f2022-04-04 15:47:50 +02001040 if (pmcsr == (u16) ~0) {
1041 pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
1042 pci_power_name(dev->current_state),
1043 pci_power_name(state));
1044 return -EIO;
1045 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001046
David Brazdil0f672f62019-12-10 10:32:29 +00001047 /*
1048 * If we're (effectively) in D3, force entire word to 0.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001049 * This doesn't affect PME_Status, disables PME_En, and
1050 * sets PowerState to 0.
1051 */
1052 switch (dev->current_state) {
1053 case PCI_D0:
1054 case PCI_D1:
1055 case PCI_D2:
1056 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1057 pmcsr |= state;
1058 break;
1059 case PCI_D3hot:
1060 case PCI_D3cold:
1061 case PCI_UNKNOWN: /* Boot-up */
1062 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
1063 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
1064 need_restore = true;
Olivier Deprez157378f2022-04-04 15:47:50 +02001065 fallthrough; /* force to D0 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001066 default:
1067 pmcsr = 0;
1068 break;
1069 }
1070
David Brazdil0f672f62019-12-10 10:32:29 +00001071 /* Enter specified state */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001072 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1073
David Brazdil0f672f62019-12-10 10:32:29 +00001074 /*
1075 * Mandatory power management transition delays; see PCI PM 1.1
1076 * 5.6.1 table 18
1077 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001078 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1079 pci_dev_d3_sleep(dev);
1080 else if (state == PCI_D2 || dev->current_state == PCI_D2)
1081 udelay(PCI_PM_D2_DELAY);
1082
1083 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1084 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
David Brazdil0f672f62019-12-10 10:32:29 +00001085 if (dev->current_state != state)
Olivier Deprez157378f2022-04-04 15:47:50 +02001086 pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
1087 pci_power_name(dev->current_state),
1088 pci_power_name(state));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001089
1090 /*
1091 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1092 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1093 * from D3hot to D0 _may_ perform an internal reset, thereby
1094 * going to "D0 Uninitialized" rather than "D0 Initialized".
1095 * For example, at least some versions of the 3c905B and the
1096 * 3c556B exhibit this behaviour.
1097 *
1098 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1099 * devices in a D3hot state at boot. Consequently, we need to
1100 * restore at least the BARs so that the device will be
1101 * accessible to its driver.
1102 */
1103 if (need_restore)
1104 pci_restore_bars(dev);
1105
1106 if (dev->bus->self)
1107 pcie_aspm_pm_state_change(dev->bus->self);
1108
1109 return 0;
1110}
1111
1112/**
1113 * pci_update_current_state - Read power state of given device and cache it
1114 * @dev: PCI device to handle.
1115 * @state: State to cache in case the device doesn't have the PM capability
1116 *
1117 * The power state is read from the PMCSR register, which however is
1118 * inaccessible in D3cold. The platform firmware is therefore queried first
1119 * to detect accessibility of the register. In case the platform firmware
1120 * reports an incorrect state or the device isn't power manageable by the
1121 * platform at all, we try to detect D3cold by testing accessibility of the
1122 * vendor ID in config space.
1123 */
1124void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1125{
1126 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1127 !pci_device_is_present(dev)) {
1128 dev->current_state = PCI_D3cold;
1129 } else if (dev->pm_cap) {
1130 u16 pmcsr;
1131
1132 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1133 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1134 } else {
1135 dev->current_state = state;
1136 }
1137}
1138
1139/**
David Brazdil0f672f62019-12-10 10:32:29 +00001140 * pci_refresh_power_state - Refresh the given device's power state data
1141 * @dev: Target PCI device.
1142 *
1143 * Ask the platform to refresh the devices power state information and invoke
1144 * pci_update_current_state() to update its current PCI power state.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001145 */
David Brazdil0f672f62019-12-10 10:32:29 +00001146void pci_refresh_power_state(struct pci_dev *dev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001147{
1148 if (platform_pci_power_manageable(dev))
David Brazdil0f672f62019-12-10 10:32:29 +00001149 platform_pci_refresh_power_state(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001150
David Brazdil0f672f62019-12-10 10:32:29 +00001151 pci_update_current_state(dev, dev->current_state);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001152}
1153
1154/**
1155 * pci_platform_power_transition - Use platform to change device power state
1156 * @dev: PCI device to handle.
1157 * @state: State to put the device into.
1158 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001159int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001160{
1161 int error;
1162
1163 if (platform_pci_power_manageable(dev)) {
1164 error = platform_pci_set_power_state(dev, state);
1165 if (!error)
1166 pci_update_current_state(dev, state);
1167 } else
1168 error = -ENODEV;
1169
1170 if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
1171 dev->current_state = PCI_D0;
1172
1173 return error;
1174}
Olivier Deprez157378f2022-04-04 15:47:50 +02001175EXPORT_SYMBOL_GPL(pci_platform_power_transition);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001176
1177/**
1178 * pci_wakeup - Wake up a PCI device
1179 * @pci_dev: Device to handle.
1180 * @ign: ignored parameter
1181 */
1182static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1183{
1184 pci_wakeup_event(pci_dev);
1185 pm_request_resume(&pci_dev->dev);
1186 return 0;
1187}
1188
1189/**
1190 * pci_wakeup_bus - Walk given bus and wake up devices on it
1191 * @bus: Top bus of the subtree to walk.
1192 */
1193void pci_wakeup_bus(struct pci_bus *bus)
1194{
1195 if (bus)
1196 pci_walk_bus(bus, pci_wakeup, NULL);
1197}
1198
Olivier Deprez157378f2022-04-04 15:47:50 +02001199static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001200{
Olivier Deprez157378f2022-04-04 15:47:50 +02001201 int delay = 1;
1202 u32 id;
1203
1204 /*
1205 * After reset, the device should not silently discard config
1206 * requests, but it may still indicate that it needs more time by
1207 * responding to them with CRS completions. The Root Port will
1208 * generally synthesize ~0 data to complete the read (except when
1209 * CRS SV is enabled and the read was for the Vendor ID; in that
1210 * case it synthesizes 0x0001 data).
1211 *
1212 * Wait for the device to return a non-CRS completion. Read the
1213 * Command register instead of Vendor ID so we don't have to
1214 * contend with the CRS SV value.
1215 */
1216 pci_read_config_dword(dev, PCI_COMMAND, &id);
1217 while (id == ~0) {
1218 if (delay > timeout) {
1219 pci_warn(dev, "not ready %dms after %s; giving up\n",
1220 delay - 1, reset_type);
1221 return -ENOTTY;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001222 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001223
1224 if (delay > 1000)
1225 pci_info(dev, "not ready %dms after %s; waiting\n",
1226 delay - 1, reset_type);
1227
1228 msleep(delay);
1229 delay *= 2;
1230 pci_read_config_dword(dev, PCI_COMMAND, &id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001231 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001232
1233 if (delay > 1000)
1234 pci_info(dev, "ready %dms after %s\n", delay - 1,
1235 reset_type);
1236
1237 return 0;
1238}
1239
1240/**
1241 * pci_power_up - Put the given device into D0
1242 * @dev: PCI device to power up
1243 */
1244int pci_power_up(struct pci_dev *dev)
1245{
1246 pci_platform_power_transition(dev, PCI_D0);
1247
1248 /*
1249 * Mandatory power management transition delays are handled in
1250 * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
1251 * corresponding bridge.
1252 */
1253 if (dev->runtime_d3cold) {
1254 /*
1255 * When powering on a bridge from D3cold, the whole hierarchy
1256 * may be powered on into D0uninitialized state, resume them to
1257 * give them a chance to suspend again
1258 */
1259 pci_wakeup_bus(dev->subordinate);
1260 }
1261
1262 return pci_raw_set_power_state(dev, PCI_D0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001263}
1264
1265/**
1266 * __pci_dev_set_current_state - Set current state of a PCI device
1267 * @dev: Device to handle
1268 * @data: pointer to state to be set
1269 */
1270static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1271{
1272 pci_power_t state = *(pci_power_t *)data;
1273
1274 dev->current_state = state;
1275 return 0;
1276}
1277
1278/**
1279 * pci_bus_set_current_state - Walk given bus and set current state of devices
1280 * @bus: Top bus of the subtree to walk.
1281 * @state: state to be set
1282 */
1283void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1284{
1285 if (bus)
1286 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1287}
1288
1289/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001290 * pci_set_power_state - Set the power state of a PCI device
1291 * @dev: PCI device to handle.
1292 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1293 *
1294 * Transition a device to a new power state, using the platform firmware and/or
1295 * the device's PCI PM registers.
1296 *
1297 * RETURN VALUE:
1298 * -EINVAL if the requested state is invalid.
1299 * -EIO if device does not support PCI PM or its PM capabilities register has a
1300 * wrong version, or device doesn't support the requested state.
1301 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1302 * 0 if device already is in the requested state.
1303 * 0 if the transition is to D3 but D3 is not supported.
1304 * 0 if device's power state has been successfully changed.
1305 */
1306int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1307{
1308 int error;
1309
David Brazdil0f672f62019-12-10 10:32:29 +00001310 /* Bound the state we're entering */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001311 if (state > PCI_D3cold)
1312 state = PCI_D3cold;
1313 else if (state < PCI_D0)
1314 state = PCI_D0;
1315 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
David Brazdil0f672f62019-12-10 10:32:29 +00001316
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001317 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001318 * If the device or the parent bridge do not support PCI
1319 * PM, ignore the request if we're doing anything other
1320 * than putting it into D0 (which would only happen on
1321 * boot).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001322 */
1323 return 0;
1324
1325 /* Check if we're already there */
1326 if (dev->current_state == state)
1327 return 0;
1328
Olivier Deprez157378f2022-04-04 15:47:50 +02001329 if (state == PCI_D0)
1330 return pci_power_up(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001331
David Brazdil0f672f62019-12-10 10:32:29 +00001332 /*
1333 * This device is quirked not to be put into D3, so don't put it in
1334 * D3
1335 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001336 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1337 return 0;
1338
1339 /*
1340 * To put device in D3cold, we put device into D3hot in native
1341 * way, then put device into D3cold with platform ops
1342 */
1343 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1344 PCI_D3hot : state);
1345
Olivier Deprez157378f2022-04-04 15:47:50 +02001346 if (pci_platform_power_transition(dev, state))
1347 return error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001348
Olivier Deprez157378f2022-04-04 15:47:50 +02001349 /* Powering off a bridge may power off the whole hierarchy */
1350 if (state == PCI_D3cold)
1351 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1352
1353 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001354}
1355EXPORT_SYMBOL(pci_set_power_state);
1356
1357/**
1358 * pci_choose_state - Choose the power state of a PCI device
1359 * @dev: PCI device to be suspended
1360 * @state: target sleep state for the whole system. This is the value
David Brazdil0f672f62019-12-10 10:32:29 +00001361 * that is passed to suspend() function.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001362 *
1363 * Returns PCI power state suitable for given device and given system
1364 * message.
1365 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001366pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1367{
1368 pci_power_t ret;
1369
1370 if (!dev->pm_cap)
1371 return PCI_D0;
1372
1373 ret = platform_pci_choose_state(dev);
1374 if (ret != PCI_POWER_ERROR)
1375 return ret;
1376
1377 switch (state.event) {
1378 case PM_EVENT_ON:
1379 return PCI_D0;
1380 case PM_EVENT_FREEZE:
1381 case PM_EVENT_PRETHAW:
1382 /* REVISIT both freeze and pre-thaw "should" use D0 */
1383 case PM_EVENT_SUSPEND:
1384 case PM_EVENT_HIBERNATE:
1385 return PCI_D3hot;
1386 default:
1387 pci_info(dev, "unrecognized suspend event %d\n",
1388 state.event);
1389 BUG();
1390 }
1391 return PCI_D0;
1392}
1393EXPORT_SYMBOL(pci_choose_state);
1394
1395#define PCI_EXP_SAVE_REGS 7
1396
1397static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1398 u16 cap, bool extended)
1399{
1400 struct pci_cap_saved_state *tmp;
1401
1402 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1403 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1404 return tmp;
1405 }
1406 return NULL;
1407}
1408
1409struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1410{
1411 return _pci_find_saved_cap(dev, cap, false);
1412}
1413
1414struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1415{
1416 return _pci_find_saved_cap(dev, cap, true);
1417}
1418
1419static int pci_save_pcie_state(struct pci_dev *dev)
1420{
1421 int i = 0;
1422 struct pci_cap_saved_state *save_state;
1423 u16 *cap;
1424
1425 if (!pci_is_pcie(dev))
1426 return 0;
1427
1428 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1429 if (!save_state) {
1430 pci_err(dev, "buffer not found in %s\n", __func__);
1431 return -ENOMEM;
1432 }
1433
1434 cap = (u16 *)&save_state->cap.data[0];
1435 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1436 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1437 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1438 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1439 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1440 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1441 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1442
1443 return 0;
1444}
1445
1446static void pci_restore_pcie_state(struct pci_dev *dev)
1447{
1448 int i = 0;
1449 struct pci_cap_saved_state *save_state;
1450 u16 *cap;
1451
1452 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1453 if (!save_state)
1454 return;
1455
1456 cap = (u16 *)&save_state->cap.data[0];
1457 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1458 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1459 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1460 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1461 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1462 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1463 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1464}
1465
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001466static int pci_save_pcix_state(struct pci_dev *dev)
1467{
1468 int pos;
1469 struct pci_cap_saved_state *save_state;
1470
1471 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1472 if (!pos)
1473 return 0;
1474
1475 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1476 if (!save_state) {
1477 pci_err(dev, "buffer not found in %s\n", __func__);
1478 return -ENOMEM;
1479 }
1480
1481 pci_read_config_word(dev, pos + PCI_X_CMD,
1482 (u16 *)save_state->cap.data);
1483
1484 return 0;
1485}
1486
1487static void pci_restore_pcix_state(struct pci_dev *dev)
1488{
1489 int i = 0, pos;
1490 struct pci_cap_saved_state *save_state;
1491 u16 *cap;
1492
1493 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1494 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1495 if (!save_state || !pos)
1496 return;
1497 cap = (u16 *)&save_state->cap.data[0];
1498
1499 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1500}
1501
David Brazdil0f672f62019-12-10 10:32:29 +00001502static void pci_save_ltr_state(struct pci_dev *dev)
1503{
1504 int ltr;
1505 struct pci_cap_saved_state *save_state;
1506 u16 *cap;
1507
1508 if (!pci_is_pcie(dev))
1509 return;
1510
1511 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1512 if (!ltr)
1513 return;
1514
1515 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1516 if (!save_state) {
1517 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1518 return;
1519 }
1520
1521 cap = (u16 *)&save_state->cap.data[0];
1522 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1523 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1524}
1525
1526static void pci_restore_ltr_state(struct pci_dev *dev)
1527{
1528 struct pci_cap_saved_state *save_state;
1529 int ltr;
1530 u16 *cap;
1531
1532 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1533 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1534 if (!save_state || !ltr)
1535 return;
1536
1537 cap = (u16 *)&save_state->cap.data[0];
1538 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1539 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1540}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001541
1542/**
David Brazdil0f672f62019-12-10 10:32:29 +00001543 * pci_save_state - save the PCI configuration space of a device before
1544 * suspending
1545 * @dev: PCI device that we're dealing with
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001546 */
1547int pci_save_state(struct pci_dev *dev)
1548{
1549 int i;
1550 /* XXX: 100% dword access ok here? */
Olivier Deprez157378f2022-04-04 15:47:50 +02001551 for (i = 0; i < 16; i++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001552 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
Olivier Deprez157378f2022-04-04 15:47:50 +02001553 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1554 i * 4, dev->saved_config_space[i]);
1555 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001556 dev->state_saved = true;
1557
1558 i = pci_save_pcie_state(dev);
1559 if (i != 0)
1560 return i;
1561
1562 i = pci_save_pcix_state(dev);
1563 if (i != 0)
1564 return i;
1565
David Brazdil0f672f62019-12-10 10:32:29 +00001566 pci_save_ltr_state(dev);
1567 pci_save_dpc_state(dev);
Olivier Deprez157378f2022-04-04 15:47:50 +02001568 pci_save_aer_state(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001569 return pci_save_vc_state(dev);
1570}
1571EXPORT_SYMBOL(pci_save_state);
1572
1573static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1574 u32 saved_val, int retry, bool force)
1575{
1576 u32 val;
1577
1578 pci_read_config_dword(pdev, offset, &val);
1579 if (!force && val == saved_val)
1580 return;
1581
1582 for (;;) {
1583 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1584 offset, val, saved_val);
1585 pci_write_config_dword(pdev, offset, saved_val);
1586 if (retry-- <= 0)
1587 return;
1588
1589 pci_read_config_dword(pdev, offset, &val);
1590 if (val == saved_val)
1591 return;
1592
1593 mdelay(1);
1594 }
1595}
1596
1597static void pci_restore_config_space_range(struct pci_dev *pdev,
1598 int start, int end, int retry,
1599 bool force)
1600{
1601 int index;
1602
1603 for (index = end; index >= start; index--)
1604 pci_restore_config_dword(pdev, 4 * index,
1605 pdev->saved_config_space[index],
1606 retry, force);
1607}
1608
1609static void pci_restore_config_space(struct pci_dev *pdev)
1610{
1611 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1612 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1613 /* Restore BARs before the command register. */
1614 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1615 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1616 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1617 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1618
1619 /*
1620 * Force rewriting of prefetch registers to avoid S3 resume
1621 * issues on Intel PCI bridges that occur when these
1622 * registers are not explicitly written.
1623 */
1624 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1625 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1626 } else {
1627 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1628 }
1629}
1630
1631static void pci_restore_rebar_state(struct pci_dev *pdev)
1632{
1633 unsigned int pos, nbars, i;
1634 u32 ctrl;
1635
1636 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1637 if (!pos)
1638 return;
1639
1640 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1641 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1642 PCI_REBAR_CTRL_NBAR_SHIFT;
1643
1644 for (i = 0; i < nbars; i++, pos += 8) {
1645 struct resource *res;
1646 int bar_idx, size;
1647
1648 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1649 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1650 res = pdev->resource + bar_idx;
David Brazdil0f672f62019-12-10 10:32:29 +00001651 size = ilog2(resource_size(res)) - 20;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001652 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1653 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1654 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1655 }
1656}
1657
1658/**
1659 * pci_restore_state - Restore the saved state of a PCI device
David Brazdil0f672f62019-12-10 10:32:29 +00001660 * @dev: PCI device that we're dealing with
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001661 */
1662void pci_restore_state(struct pci_dev *dev)
1663{
1664 if (!dev->state_saved)
1665 return;
1666
David Brazdil0f672f62019-12-10 10:32:29 +00001667 /*
1668 * Restore max latencies (in the LTR capability) before enabling
1669 * LTR itself (in the PCIe capability).
1670 */
1671 pci_restore_ltr_state(dev);
1672
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001673 pci_restore_pcie_state(dev);
1674 pci_restore_pasid_state(dev);
1675 pci_restore_pri_state(dev);
1676 pci_restore_ats_state(dev);
1677 pci_restore_vc_state(dev);
1678 pci_restore_rebar_state(dev);
David Brazdil0f672f62019-12-10 10:32:29 +00001679 pci_restore_dpc_state(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001680
Olivier Deprez157378f2022-04-04 15:47:50 +02001681 pci_aer_clear_status(dev);
1682 pci_restore_aer_state(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001683
1684 pci_restore_config_space(dev);
1685
1686 pci_restore_pcix_state(dev);
1687 pci_restore_msi_state(dev);
1688
1689 /* Restore ACS and IOV configuration state */
1690 pci_enable_acs(dev);
1691 pci_restore_iov_state(dev);
1692
1693 dev->state_saved = false;
1694}
1695EXPORT_SYMBOL(pci_restore_state);
1696
1697struct pci_saved_state {
1698 u32 config_space[16];
Olivier Deprez157378f2022-04-04 15:47:50 +02001699 struct pci_cap_saved_data cap[];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001700};
1701
1702/**
1703 * pci_store_saved_state - Allocate and return an opaque struct containing
1704 * the device saved state.
1705 * @dev: PCI device that we're dealing with
1706 *
1707 * Return NULL if no state or error.
1708 */
1709struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1710{
1711 struct pci_saved_state *state;
1712 struct pci_cap_saved_state *tmp;
1713 struct pci_cap_saved_data *cap;
1714 size_t size;
1715
1716 if (!dev->state_saved)
1717 return NULL;
1718
1719 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1720
1721 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1722 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1723
1724 state = kzalloc(size, GFP_KERNEL);
1725 if (!state)
1726 return NULL;
1727
1728 memcpy(state->config_space, dev->saved_config_space,
1729 sizeof(state->config_space));
1730
1731 cap = state->cap;
1732 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1733 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1734 memcpy(cap, &tmp->cap, len);
1735 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1736 }
1737 /* Empty cap_save terminates list */
1738
1739 return state;
1740}
1741EXPORT_SYMBOL_GPL(pci_store_saved_state);
1742
1743/**
1744 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1745 * @dev: PCI device that we're dealing with
1746 * @state: Saved state returned from pci_store_saved_state()
1747 */
1748int pci_load_saved_state(struct pci_dev *dev,
1749 struct pci_saved_state *state)
1750{
1751 struct pci_cap_saved_data *cap;
1752
1753 dev->state_saved = false;
1754
1755 if (!state)
1756 return 0;
1757
1758 memcpy(dev->saved_config_space, state->config_space,
1759 sizeof(state->config_space));
1760
1761 cap = state->cap;
1762 while (cap->size) {
1763 struct pci_cap_saved_state *tmp;
1764
1765 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1766 if (!tmp || tmp->cap.size != cap->size)
1767 return -EINVAL;
1768
1769 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1770 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1771 sizeof(struct pci_cap_saved_data) + cap->size);
1772 }
1773
1774 dev->state_saved = true;
1775 return 0;
1776}
1777EXPORT_SYMBOL_GPL(pci_load_saved_state);
1778
1779/**
1780 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1781 * and free the memory allocated for it.
1782 * @dev: PCI device that we're dealing with
1783 * @state: Pointer to saved state returned from pci_store_saved_state()
1784 */
1785int pci_load_and_free_saved_state(struct pci_dev *dev,
1786 struct pci_saved_state **state)
1787{
1788 int ret = pci_load_saved_state(dev, *state);
1789 kfree(*state);
1790 *state = NULL;
1791 return ret;
1792}
1793EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1794
1795int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1796{
1797 return pci_enable_resources(dev, bars);
1798}
1799
1800static int do_pci_enable_device(struct pci_dev *dev, int bars)
1801{
1802 int err;
1803 struct pci_dev *bridge;
1804 u16 cmd;
1805 u8 pin;
1806
1807 err = pci_set_power_state(dev, PCI_D0);
1808 if (err < 0 && err != -EIO)
1809 return err;
1810
1811 bridge = pci_upstream_bridge(dev);
1812 if (bridge)
1813 pcie_aspm_powersave_config_link(bridge);
1814
1815 err = pcibios_enable_device(dev, bars);
1816 if (err < 0)
1817 return err;
1818 pci_fixup_device(pci_fixup_enable, dev);
1819
1820 if (dev->msi_enabled || dev->msix_enabled)
1821 return 0;
1822
1823 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1824 if (pin) {
1825 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1826 if (cmd & PCI_COMMAND_INTX_DISABLE)
1827 pci_write_config_word(dev, PCI_COMMAND,
1828 cmd & ~PCI_COMMAND_INTX_DISABLE);
1829 }
1830
1831 return 0;
1832}
1833
1834/**
1835 * pci_reenable_device - Resume abandoned device
1836 * @dev: PCI device to be resumed
1837 *
David Brazdil0f672f62019-12-10 10:32:29 +00001838 * NOTE: This function is a backend of pci_default_resume() and is not supposed
1839 * to be called by normal code, write proper resume handler and use it instead.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001840 */
1841int pci_reenable_device(struct pci_dev *dev)
1842{
1843 if (pci_is_enabled(dev))
1844 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1845 return 0;
1846}
1847EXPORT_SYMBOL(pci_reenable_device);
1848
1849static void pci_enable_bridge(struct pci_dev *dev)
1850{
1851 struct pci_dev *bridge;
1852 int retval;
1853
1854 bridge = pci_upstream_bridge(dev);
1855 if (bridge)
1856 pci_enable_bridge(bridge);
1857
1858 if (pci_is_enabled(dev)) {
1859 if (!dev->is_busmaster)
1860 pci_set_master(dev);
1861 return;
1862 }
1863
1864 retval = pci_enable_device(dev);
1865 if (retval)
1866 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1867 retval);
1868 pci_set_master(dev);
1869}
1870
1871static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1872{
1873 struct pci_dev *bridge;
1874 int err;
1875 int i, bars = 0;
1876
1877 /*
1878 * Power state could be unknown at this point, either due to a fresh
1879 * boot or a device removal call. So get the current power state
1880 * so that things like MSI message writing will behave as expected
1881 * (e.g. if the device really is in D0 at enable time).
1882 */
Olivier Deprez0e641232021-09-23 10:07:05 +02001883 pci_update_current_state(dev, dev->current_state);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001884
1885 if (atomic_inc_return(&dev->enable_cnt) > 1)
1886 return 0; /* already enabled */
1887
1888 bridge = pci_upstream_bridge(dev);
1889 if (bridge)
1890 pci_enable_bridge(bridge);
1891
1892 /* only skip sriov related */
1893 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1894 if (dev->resource[i].flags & flags)
1895 bars |= (1 << i);
1896 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1897 if (dev->resource[i].flags & flags)
1898 bars |= (1 << i);
1899
1900 err = do_pci_enable_device(dev, bars);
1901 if (err < 0)
1902 atomic_dec(&dev->enable_cnt);
1903 return err;
1904}
1905
1906/**
1907 * pci_enable_device_io - Initialize a device for use with IO space
1908 * @dev: PCI device to be initialized
1909 *
David Brazdil0f672f62019-12-10 10:32:29 +00001910 * Initialize device before it's used by a driver. Ask low-level code
1911 * to enable I/O resources. Wake up the device if it was suspended.
1912 * Beware, this function can fail.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001913 */
1914int pci_enable_device_io(struct pci_dev *dev)
1915{
1916 return pci_enable_device_flags(dev, IORESOURCE_IO);
1917}
1918EXPORT_SYMBOL(pci_enable_device_io);
1919
1920/**
1921 * pci_enable_device_mem - Initialize a device for use with Memory space
1922 * @dev: PCI device to be initialized
1923 *
David Brazdil0f672f62019-12-10 10:32:29 +00001924 * Initialize device before it's used by a driver. Ask low-level code
1925 * to enable Memory resources. Wake up the device if it was suspended.
1926 * Beware, this function can fail.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001927 */
1928int pci_enable_device_mem(struct pci_dev *dev)
1929{
1930 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1931}
1932EXPORT_SYMBOL(pci_enable_device_mem);
1933
1934/**
1935 * pci_enable_device - Initialize device before it's used by a driver.
1936 * @dev: PCI device to be initialized
1937 *
David Brazdil0f672f62019-12-10 10:32:29 +00001938 * Initialize device before it's used by a driver. Ask low-level code
1939 * to enable I/O and memory. Wake up the device if it was suspended.
1940 * Beware, this function can fail.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001941 *
David Brazdil0f672f62019-12-10 10:32:29 +00001942 * Note we don't actually enable the device many times if we call
1943 * this function repeatedly (we just increment the count).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001944 */
1945int pci_enable_device(struct pci_dev *dev)
1946{
1947 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1948}
1949EXPORT_SYMBOL(pci_enable_device);
1950
1951/*
David Brazdil0f672f62019-12-10 10:32:29 +00001952 * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
1953 * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001954 * there's no need to track it separately. pci_devres is initialized
1955 * when a device is enabled using managed PCI device enable interface.
1956 */
1957struct pci_devres {
1958 unsigned int enabled:1;
1959 unsigned int pinned:1;
1960 unsigned int orig_intx:1;
1961 unsigned int restore_intx:1;
1962 unsigned int mwi:1;
1963 u32 region_mask;
1964};
1965
1966static void pcim_release(struct device *gendev, void *res)
1967{
1968 struct pci_dev *dev = to_pci_dev(gendev);
1969 struct pci_devres *this = res;
1970 int i;
1971
1972 if (dev->msi_enabled)
1973 pci_disable_msi(dev);
1974 if (dev->msix_enabled)
1975 pci_disable_msix(dev);
1976
1977 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1978 if (this->region_mask & (1 << i))
1979 pci_release_region(dev, i);
1980
1981 if (this->mwi)
1982 pci_clear_mwi(dev);
1983
1984 if (this->restore_intx)
1985 pci_intx(dev, this->orig_intx);
1986
1987 if (this->enabled && !this->pinned)
1988 pci_disable_device(dev);
1989}
1990
1991static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1992{
1993 struct pci_devres *dr, *new_dr;
1994
1995 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1996 if (dr)
1997 return dr;
1998
1999 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2000 if (!new_dr)
2001 return NULL;
2002 return devres_get(&pdev->dev, new_dr, NULL, NULL);
2003}
2004
2005static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2006{
2007 if (pci_is_managed(pdev))
2008 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2009 return NULL;
2010}
2011
2012/**
2013 * pcim_enable_device - Managed pci_enable_device()
2014 * @pdev: PCI device to be initialized
2015 *
2016 * Managed pci_enable_device().
2017 */
2018int pcim_enable_device(struct pci_dev *pdev)
2019{
2020 struct pci_devres *dr;
2021 int rc;
2022
2023 dr = get_pci_dr(pdev);
2024 if (unlikely(!dr))
2025 return -ENOMEM;
2026 if (dr->enabled)
2027 return 0;
2028
2029 rc = pci_enable_device(pdev);
2030 if (!rc) {
2031 pdev->is_managed = 1;
2032 dr->enabled = 1;
2033 }
2034 return rc;
2035}
2036EXPORT_SYMBOL(pcim_enable_device);
2037
2038/**
2039 * pcim_pin_device - Pin managed PCI device
2040 * @pdev: PCI device to pin
2041 *
2042 * Pin managed PCI device @pdev. Pinned device won't be disabled on
2043 * driver detach. @pdev must have been enabled with
2044 * pcim_enable_device().
2045 */
2046void pcim_pin_device(struct pci_dev *pdev)
2047{
2048 struct pci_devres *dr;
2049
2050 dr = find_pci_dr(pdev);
2051 WARN_ON(!dr || !dr->enabled);
2052 if (dr)
2053 dr->pinned = 1;
2054}
2055EXPORT_SYMBOL(pcim_pin_device);
2056
2057/*
2058 * pcibios_add_device - provide arch specific hooks when adding device dev
2059 * @dev: the PCI device being added
2060 *
2061 * Permits the platform to provide architecture specific functionality when
2062 * devices are added. This is the default implementation. Architecture
2063 * implementations can override this.
2064 */
2065int __weak pcibios_add_device(struct pci_dev *dev)
2066{
2067 return 0;
2068}
2069
2070/**
David Brazdil0f672f62019-12-10 10:32:29 +00002071 * pcibios_release_device - provide arch specific hooks when releasing
2072 * device dev
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002073 * @dev: the PCI device being released
2074 *
2075 * Permits the platform to provide architecture specific functionality when
2076 * devices are released. This is the default implementation. Architecture
2077 * implementations can override this.
2078 */
2079void __weak pcibios_release_device(struct pci_dev *dev) {}
2080
2081/**
2082 * pcibios_disable_device - disable arch specific PCI resources for device dev
2083 * @dev: the PCI device to disable
2084 *
2085 * Disables architecture specific PCI resources for the device. This
2086 * is the default implementation. Architecture implementations can
2087 * override this.
2088 */
2089void __weak pcibios_disable_device(struct pci_dev *dev) {}
2090
2091/**
2092 * pcibios_penalize_isa_irq - penalize an ISA IRQ
2093 * @irq: ISA IRQ to penalize
2094 * @active: IRQ active or not
2095 *
2096 * Permits the platform to provide architecture-specific functionality when
2097 * penalizing ISA IRQs. This is the default implementation. Architecture
2098 * implementations can override this.
2099 */
2100void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2101
2102static void do_pci_disable_device(struct pci_dev *dev)
2103{
2104 u16 pci_command;
2105
2106 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2107 if (pci_command & PCI_COMMAND_MASTER) {
2108 pci_command &= ~PCI_COMMAND_MASTER;
2109 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2110 }
2111
2112 pcibios_disable_device(dev);
2113}
2114
2115/**
2116 * pci_disable_enabled_device - Disable device without updating enable_cnt
2117 * @dev: PCI device to disable
2118 *
2119 * NOTE: This function is a backend of PCI power management routines and is
2120 * not supposed to be called drivers.
2121 */
2122void pci_disable_enabled_device(struct pci_dev *dev)
2123{
2124 if (pci_is_enabled(dev))
2125 do_pci_disable_device(dev);
2126}
2127
2128/**
2129 * pci_disable_device - Disable PCI device after use
2130 * @dev: PCI device to be disabled
2131 *
2132 * Signal to the system that the PCI device is not in use by the system
2133 * anymore. This only involves disabling PCI bus-mastering, if active.
2134 *
2135 * Note we don't actually disable the device until all callers of
2136 * pci_enable_device() have called pci_disable_device().
2137 */
2138void pci_disable_device(struct pci_dev *dev)
2139{
2140 struct pci_devres *dr;
2141
2142 dr = find_pci_dr(dev);
2143 if (dr)
2144 dr->enabled = 0;
2145
2146 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2147 "disabling already-disabled device");
2148
2149 if (atomic_dec_return(&dev->enable_cnt) != 0)
2150 return;
2151
2152 do_pci_disable_device(dev);
2153
2154 dev->is_busmaster = 0;
2155}
2156EXPORT_SYMBOL(pci_disable_device);
2157
2158/**
2159 * pcibios_set_pcie_reset_state - set reset state for device dev
2160 * @dev: the PCIe device reset
2161 * @state: Reset state to enter into
2162 *
David Brazdil0f672f62019-12-10 10:32:29 +00002163 * Set the PCIe reset state for the device. This is the default
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002164 * implementation. Architecture implementations can override this.
2165 */
2166int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2167 enum pcie_reset_state state)
2168{
2169 return -EINVAL;
2170}
2171
2172/**
2173 * pci_set_pcie_reset_state - set reset state for device dev
2174 * @dev: the PCIe device reset
2175 * @state: Reset state to enter into
2176 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002177 * Sets the PCI reset state for the device.
2178 */
2179int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2180{
2181 return pcibios_set_pcie_reset_state(dev, state);
2182}
2183EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2184
Olivier Deprez157378f2022-04-04 15:47:50 +02002185void pcie_clear_device_status(struct pci_dev *dev)
2186{
2187 u16 sta;
2188
2189 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2190 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2191}
2192
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002193/**
2194 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2195 * @dev: PCIe root port or event collector.
2196 */
2197void pcie_clear_root_pme_status(struct pci_dev *dev)
2198{
2199 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2200}
2201
2202/**
2203 * pci_check_pme_status - Check if given device has generated PME.
2204 * @dev: Device to check.
2205 *
2206 * Check the PME status of the device and if set, clear it and clear PME enable
2207 * (if set). Return 'true' if PME status and PME enable were both set or
2208 * 'false' otherwise.
2209 */
2210bool pci_check_pme_status(struct pci_dev *dev)
2211{
2212 int pmcsr_pos;
2213 u16 pmcsr;
2214 bool ret = false;
2215
2216 if (!dev->pm_cap)
2217 return false;
2218
2219 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2220 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2221 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2222 return false;
2223
2224 /* Clear PME status. */
2225 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2226 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2227 /* Disable PME to avoid interrupt flood. */
2228 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2229 ret = true;
2230 }
2231
2232 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2233
2234 return ret;
2235}
2236
2237/**
2238 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2239 * @dev: Device to handle.
2240 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2241 *
2242 * Check if @dev has generated PME and queue a resume request for it in that
2243 * case.
2244 */
2245static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2246{
2247 if (pme_poll_reset && dev->pme_poll)
2248 dev->pme_poll = false;
2249
2250 if (pci_check_pme_status(dev)) {
2251 pci_wakeup_event(dev);
2252 pm_request_resume(&dev->dev);
2253 }
2254 return 0;
2255}
2256
2257/**
2258 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2259 * @bus: Top bus of the subtree to walk.
2260 */
2261void pci_pme_wakeup_bus(struct pci_bus *bus)
2262{
2263 if (bus)
2264 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2265}
2266
2267
2268/**
2269 * pci_pme_capable - check the capability of PCI device to generate PME#
2270 * @dev: PCI device to handle.
2271 * @state: PCI state from which device will issue PME#.
2272 */
2273bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2274{
2275 if (!dev->pm_cap)
2276 return false;
2277
2278 return !!(dev->pme_support & (1 << state));
2279}
2280EXPORT_SYMBOL(pci_pme_capable);
2281
2282static void pci_pme_list_scan(struct work_struct *work)
2283{
2284 struct pci_pme_device *pme_dev, *n;
2285
2286 mutex_lock(&pci_pme_list_mutex);
2287 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2288 if (pme_dev->dev->pme_poll) {
2289 struct pci_dev *bridge;
2290
2291 bridge = pme_dev->dev->bus->self;
2292 /*
2293 * If bridge is in low power state, the
2294 * configuration space of subordinate devices
2295 * may be not accessible
2296 */
2297 if (bridge && bridge->current_state != PCI_D0)
2298 continue;
David Brazdil0f672f62019-12-10 10:32:29 +00002299 /*
2300 * If the device is in D3cold it should not be
2301 * polled either.
2302 */
2303 if (pme_dev->dev->current_state == PCI_D3cold)
2304 continue;
2305
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002306 pci_pme_wakeup(pme_dev->dev, NULL);
2307 } else {
2308 list_del(&pme_dev->list);
2309 kfree(pme_dev);
2310 }
2311 }
2312 if (!list_empty(&pci_pme_list))
2313 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2314 msecs_to_jiffies(PME_TIMEOUT));
2315 mutex_unlock(&pci_pme_list_mutex);
2316}
2317
2318static void __pci_pme_active(struct pci_dev *dev, bool enable)
2319{
2320 u16 pmcsr;
2321
2322 if (!dev->pme_support)
2323 return;
2324
2325 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2326 /* Clear PME_Status by writing 1 to it and enable PME# */
2327 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2328 if (!enable)
2329 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2330
2331 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2332}
2333
2334/**
2335 * pci_pme_restore - Restore PME configuration after config space restore.
2336 * @dev: PCI device to update.
2337 */
2338void pci_pme_restore(struct pci_dev *dev)
2339{
2340 u16 pmcsr;
2341
2342 if (!dev->pme_support)
2343 return;
2344
2345 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2346 if (dev->wakeup_prepared) {
2347 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2348 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2349 } else {
2350 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2351 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2352 }
2353 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2354}
2355
2356/**
2357 * pci_pme_active - enable or disable PCI device's PME# function
2358 * @dev: PCI device to handle.
2359 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2360 *
2361 * The caller must verify that the device is capable of generating PME# before
2362 * calling this function with @enable equal to 'true'.
2363 */
2364void pci_pme_active(struct pci_dev *dev, bool enable)
2365{
2366 __pci_pme_active(dev, enable);
2367
2368 /*
2369 * PCI (as opposed to PCIe) PME requires that the device have
2370 * its PME# line hooked up correctly. Not all hardware vendors
2371 * do this, so the PME never gets delivered and the device
2372 * remains asleep. The easiest way around this is to
2373 * periodically walk the list of suspended devices and check
2374 * whether any have their PME flag set. The assumption is that
2375 * we'll wake up often enough anyway that this won't be a huge
2376 * hit, and the power savings from the devices will still be a
2377 * win.
2378 *
2379 * Although PCIe uses in-band PME message instead of PME# line
2380 * to report PME, PME does not work for some PCIe devices in
2381 * reality. For example, there are devices that set their PME
2382 * status bits, but don't really bother to send a PME message;
2383 * there are PCI Express Root Ports that don't bother to
2384 * trigger interrupts when they receive PME messages from the
2385 * devices below. So PME poll is used for PCIe devices too.
2386 */
2387
2388 if (dev->pme_poll) {
2389 struct pci_pme_device *pme_dev;
2390 if (enable) {
2391 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2392 GFP_KERNEL);
2393 if (!pme_dev) {
2394 pci_warn(dev, "can't enable PME#\n");
2395 return;
2396 }
2397 pme_dev->dev = dev;
2398 mutex_lock(&pci_pme_list_mutex);
2399 list_add(&pme_dev->list, &pci_pme_list);
2400 if (list_is_singular(&pci_pme_list))
2401 queue_delayed_work(system_freezable_wq,
2402 &pci_pme_work,
2403 msecs_to_jiffies(PME_TIMEOUT));
2404 mutex_unlock(&pci_pme_list_mutex);
2405 } else {
2406 mutex_lock(&pci_pme_list_mutex);
2407 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2408 if (pme_dev->dev == dev) {
2409 list_del(&pme_dev->list);
2410 kfree(pme_dev);
2411 break;
2412 }
2413 }
2414 mutex_unlock(&pci_pme_list_mutex);
2415 }
2416 }
2417
2418 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2419}
2420EXPORT_SYMBOL(pci_pme_active);
2421
2422/**
2423 * __pci_enable_wake - enable PCI device as wakeup event source
2424 * @dev: PCI device affected
2425 * @state: PCI state from which device will issue wakeup events
2426 * @enable: True to enable event generation; false to disable
2427 *
2428 * This enables the device as a wakeup event source, or disables it.
2429 * When such events involves platform-specific hooks, those hooks are
2430 * called automatically by this routine.
2431 *
2432 * Devices with legacy power management (no standard PCI PM capabilities)
2433 * always require such platform hooks.
2434 *
2435 * RETURN VALUE:
2436 * 0 is returned on success
2437 * -EINVAL is returned if device is not supposed to wake up the system
2438 * Error code depending on the platform is returned if both the platform and
2439 * the native mechanism fail to enable the generation of wake-up events
2440 */
2441static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2442{
2443 int ret = 0;
2444
2445 /*
David Brazdil0f672f62019-12-10 10:32:29 +00002446 * Bridges that are not power-manageable directly only signal
2447 * wakeup on behalf of subordinate devices which is set up
2448 * elsewhere, so skip them. However, bridges that are
2449 * power-manageable may signal wakeup for themselves (for example,
2450 * on a hotplug event) and they need to be covered here.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002451 */
David Brazdil0f672f62019-12-10 10:32:29 +00002452 if (!pci_power_manageable(dev))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002453 return 0;
2454
2455 /* Don't do the same thing twice in a row for one device. */
2456 if (!!enable == !!dev->wakeup_prepared)
2457 return 0;
2458
2459 /*
2460 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2461 * Anderson we should be doing PME# wake enable followed by ACPI wake
2462 * enable. To disable wake-up we call the platform first, for symmetry.
2463 */
2464
2465 if (enable) {
2466 int error;
2467
Olivier Deprez0e641232021-09-23 10:07:05 +02002468 /*
2469 * Enable PME signaling if the device can signal PME from
2470 * D3cold regardless of whether or not it can signal PME from
2471 * the current target state, because that will allow it to
2472 * signal PME when the hierarchy above it goes into D3cold and
2473 * the device itself ends up in D3cold as a result of that.
2474 */
2475 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002476 pci_pme_active(dev, true);
2477 else
2478 ret = 1;
2479 error = platform_pci_set_wakeup(dev, true);
2480 if (ret)
2481 ret = error;
2482 if (!ret)
2483 dev->wakeup_prepared = true;
2484 } else {
2485 platform_pci_set_wakeup(dev, false);
2486 pci_pme_active(dev, false);
2487 dev->wakeup_prepared = false;
2488 }
2489
2490 return ret;
2491}
2492
2493/**
2494 * pci_enable_wake - change wakeup settings for a PCI device
2495 * @pci_dev: Target device
2496 * @state: PCI state from which device will issue wakeup events
2497 * @enable: Whether or not to enable event generation
2498 *
2499 * If @enable is set, check device_may_wakeup() for the device before calling
2500 * __pci_enable_wake() for it.
2501 */
2502int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2503{
2504 if (enable && !device_may_wakeup(&pci_dev->dev))
2505 return -EINVAL;
2506
2507 return __pci_enable_wake(pci_dev, state, enable);
2508}
2509EXPORT_SYMBOL(pci_enable_wake);
2510
2511/**
2512 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2513 * @dev: PCI device to prepare
2514 * @enable: True to enable wake-up event generation; false to disable
2515 *
2516 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2517 * and this function allows them to set that up cleanly - pci_enable_wake()
2518 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2519 * ordering constraints.
2520 *
2521 * This function only returns error code if the device is not allowed to wake
2522 * up the system from sleep or it is not capable of generating PME# from both
2523 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2524 */
2525int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2526{
2527 return pci_pme_capable(dev, PCI_D3cold) ?
2528 pci_enable_wake(dev, PCI_D3cold, enable) :
2529 pci_enable_wake(dev, PCI_D3hot, enable);
2530}
2531EXPORT_SYMBOL(pci_wake_from_d3);
2532
2533/**
2534 * pci_target_state - find an appropriate low power state for a given PCI dev
2535 * @dev: PCI device
2536 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2537 *
2538 * Use underlying platform code to find a supported low power state for @dev.
2539 * If the platform can't manage @dev, return the deepest state from which it
2540 * can generate wake events, based on any available PME info.
2541 */
2542static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2543{
2544 pci_power_t target_state = PCI_D3hot;
2545
2546 if (platform_pci_power_manageable(dev)) {
2547 /*
2548 * Call the platform to find the target state for the device.
2549 */
2550 pci_power_t state = platform_pci_choose_state(dev);
2551
2552 switch (state) {
2553 case PCI_POWER_ERROR:
2554 case PCI_UNKNOWN:
2555 break;
2556 case PCI_D1:
2557 case PCI_D2:
2558 if (pci_no_d1d2(dev))
2559 break;
Olivier Deprez157378f2022-04-04 15:47:50 +02002560 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002561 default:
2562 target_state = state;
2563 }
2564
2565 return target_state;
2566 }
2567
2568 if (!dev->pm_cap)
2569 target_state = PCI_D0;
2570
2571 /*
2572 * If the device is in D3cold even though it's not power-manageable by
2573 * the platform, it may have been powered down by non-standard means.
2574 * Best to let it slumber.
2575 */
2576 if (dev->current_state == PCI_D3cold)
2577 target_state = PCI_D3cold;
2578
Olivier Deprez0e641232021-09-23 10:07:05 +02002579 if (wakeup && dev->pme_support) {
2580 pci_power_t state = target_state;
2581
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002582 /*
2583 * Find the deepest state from which the device can generate
2584 * PME#.
2585 */
Olivier Deprez0e641232021-09-23 10:07:05 +02002586 while (state && !(dev->pme_support & (1 << state)))
2587 state--;
2588
2589 if (state)
2590 return state;
2591 else if (dev->pme_support & 1)
2592 return PCI_D0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002593 }
2594
2595 return target_state;
2596}
2597
2598/**
David Brazdil0f672f62019-12-10 10:32:29 +00002599 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2600 * into a sleep state
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002601 * @dev: Device to handle.
2602 *
2603 * Choose the power state appropriate for the device depending on whether
2604 * it can wake up the system and/or is power manageable by the platform
2605 * (PCI_D3hot is the default) and put the device into that state.
2606 */
2607int pci_prepare_to_sleep(struct pci_dev *dev)
2608{
2609 bool wakeup = device_may_wakeup(&dev->dev);
2610 pci_power_t target_state = pci_target_state(dev, wakeup);
2611 int error;
2612
2613 if (target_state == PCI_POWER_ERROR)
2614 return -EIO;
2615
2616 pci_enable_wake(dev, target_state, wakeup);
2617
2618 error = pci_set_power_state(dev, target_state);
2619
2620 if (error)
2621 pci_enable_wake(dev, target_state, false);
2622
2623 return error;
2624}
2625EXPORT_SYMBOL(pci_prepare_to_sleep);
2626
2627/**
David Brazdil0f672f62019-12-10 10:32:29 +00002628 * pci_back_from_sleep - turn PCI device on during system-wide transition
2629 * into working state
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002630 * @dev: Device to handle.
2631 *
2632 * Disable device's system wake-up capability and put it into D0.
2633 */
2634int pci_back_from_sleep(struct pci_dev *dev)
2635{
2636 pci_enable_wake(dev, PCI_D0, false);
2637 return pci_set_power_state(dev, PCI_D0);
2638}
2639EXPORT_SYMBOL(pci_back_from_sleep);
2640
2641/**
2642 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2643 * @dev: PCI device being suspended.
2644 *
2645 * Prepare @dev to generate wake-up events at run time and put it into a low
2646 * power state.
2647 */
2648int pci_finish_runtime_suspend(struct pci_dev *dev)
2649{
2650 pci_power_t target_state;
2651 int error;
2652
2653 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2654 if (target_state == PCI_POWER_ERROR)
2655 return -EIO;
2656
2657 dev->runtime_d3cold = target_state == PCI_D3cold;
2658
2659 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2660
2661 error = pci_set_power_state(dev, target_state);
2662
2663 if (error) {
2664 pci_enable_wake(dev, target_state, false);
2665 dev->runtime_d3cold = false;
2666 }
2667
2668 return error;
2669}
2670
2671/**
2672 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2673 * @dev: Device to check.
2674 *
2675 * Return true if the device itself is capable of generating wake-up events
2676 * (through the platform or using the native PCIe PME) or if the device supports
2677 * PME and one of its upstream bridges can generate wake-up events.
2678 */
2679bool pci_dev_run_wake(struct pci_dev *dev)
2680{
2681 struct pci_bus *bus = dev->bus;
2682
2683 if (!dev->pme_support)
2684 return false;
2685
2686 /* PME-capable in principle, but not from the target power state */
2687 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2688 return false;
2689
2690 if (device_can_wakeup(&dev->dev))
2691 return true;
2692
2693 while (bus->parent) {
2694 struct pci_dev *bridge = bus->self;
2695
2696 if (device_can_wakeup(&bridge->dev))
2697 return true;
2698
2699 bus = bus->parent;
2700 }
2701
2702 /* We have reached the root bus. */
2703 if (bus->bridge)
2704 return device_can_wakeup(bus->bridge);
2705
2706 return false;
2707}
2708EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2709
2710/**
David Brazdil0f672f62019-12-10 10:32:29 +00002711 * pci_dev_need_resume - Check if it is necessary to resume the device.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002712 * @pci_dev: Device to check.
2713 *
David Brazdil0f672f62019-12-10 10:32:29 +00002714 * Return 'true' if the device is not runtime-suspended or it has to be
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002715 * reconfigured due to wakeup settings difference between system and runtime
David Brazdil0f672f62019-12-10 10:32:29 +00002716 * suspend, or the current power state of it is not suitable for the upcoming
2717 * (system-wide) transition.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002718 */
David Brazdil0f672f62019-12-10 10:32:29 +00002719bool pci_dev_need_resume(struct pci_dev *pci_dev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002720{
2721 struct device *dev = &pci_dev->dev;
David Brazdil0f672f62019-12-10 10:32:29 +00002722 pci_power_t target_state;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002723
David Brazdil0f672f62019-12-10 10:32:29 +00002724 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2725 return true;
2726
2727 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002728
2729 /*
David Brazdil0f672f62019-12-10 10:32:29 +00002730 * If the earlier platform check has not triggered, D3cold is just power
2731 * removal on top of D3hot, so no need to resume the device in that
2732 * case.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002733 */
David Brazdil0f672f62019-12-10 10:32:29 +00002734 return target_state != pci_dev->current_state &&
2735 target_state != PCI_D3cold &&
2736 pci_dev->current_state != PCI_D3hot;
2737}
2738
2739/**
2740 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2741 * @pci_dev: Device to check.
2742 *
2743 * If the device is suspended and it is not configured for system wakeup,
2744 * disable PME for it to prevent it from waking up the system unnecessarily.
2745 *
2746 * Note that if the device's power state is D3cold and the platform check in
2747 * pci_dev_need_resume() has not triggered, the device's configuration need not
2748 * be changed.
2749 */
2750void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2751{
2752 struct device *dev = &pci_dev->dev;
2753
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002754 spin_lock_irq(&dev->power.lock);
2755
David Brazdil0f672f62019-12-10 10:32:29 +00002756 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2757 pci_dev->current_state < PCI_D3cold)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002758 __pci_pme_active(pci_dev, false);
2759
2760 spin_unlock_irq(&dev->power.lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002761}
2762
2763/**
2764 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2765 * @pci_dev: Device to handle.
2766 *
2767 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2768 * it might have been disabled during the prepare phase of system suspend if
2769 * the device was not configured for system wakeup.
2770 */
2771void pci_dev_complete_resume(struct pci_dev *pci_dev)
2772{
2773 struct device *dev = &pci_dev->dev;
2774
2775 if (!pci_dev_run_wake(pci_dev))
2776 return;
2777
2778 spin_lock_irq(&dev->power.lock);
2779
2780 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2781 __pci_pme_active(pci_dev, true);
2782
2783 spin_unlock_irq(&dev->power.lock);
2784}
2785
2786void pci_config_pm_runtime_get(struct pci_dev *pdev)
2787{
2788 struct device *dev = &pdev->dev;
2789 struct device *parent = dev->parent;
2790
2791 if (parent)
2792 pm_runtime_get_sync(parent);
2793 pm_runtime_get_noresume(dev);
2794 /*
2795 * pdev->current_state is set to PCI_D3cold during suspending,
2796 * so wait until suspending completes
2797 */
2798 pm_runtime_barrier(dev);
2799 /*
2800 * Only need to resume devices in D3cold, because config
2801 * registers are still accessible for devices suspended but
2802 * not in D3cold.
2803 */
2804 if (pdev->current_state == PCI_D3cold)
2805 pm_runtime_resume(dev);
2806}
2807
2808void pci_config_pm_runtime_put(struct pci_dev *pdev)
2809{
2810 struct device *dev = &pdev->dev;
2811 struct device *parent = dev->parent;
2812
2813 pm_runtime_put(dev);
2814 if (parent)
2815 pm_runtime_put_sync(parent);
2816}
2817
David Brazdil0f672f62019-12-10 10:32:29 +00002818static const struct dmi_system_id bridge_d3_blacklist[] = {
2819#ifdef CONFIG_X86
2820 {
2821 /*
2822 * Gigabyte X299 root port is not marked as hotplug capable
2823 * which allows Linux to power manage it. However, this
2824 * confuses the BIOS SMI handler so don't power manage root
2825 * ports on that system.
2826 */
2827 .ident = "X299 DESIGNARE EX-CF",
2828 .matches = {
2829 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2830 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2831 },
2832 },
Olivier Deprez92d4c212022-12-06 15:05:30 +01002833 {
2834 /*
2835 * Downstream device is not accessible after putting a root port
2836 * into D3cold and back into D0 on Elo i2.
2837 */
2838 .ident = "Elo i2",
2839 .matches = {
2840 DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
2841 DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
2842 DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
2843 },
2844 },
David Brazdil0f672f62019-12-10 10:32:29 +00002845#endif
2846 { }
2847};
2848
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002849/**
2850 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2851 * @bridge: Bridge to check
2852 *
2853 * This function checks if it is possible to move the bridge to D3.
2854 * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
2855 */
2856bool pci_bridge_d3_possible(struct pci_dev *bridge)
2857{
2858 if (!pci_is_pcie(bridge))
2859 return false;
2860
2861 switch (pci_pcie_type(bridge)) {
2862 case PCI_EXP_TYPE_ROOT_PORT:
2863 case PCI_EXP_TYPE_UPSTREAM:
2864 case PCI_EXP_TYPE_DOWNSTREAM:
2865 if (pci_bridge_d3_disable)
2866 return false;
2867
2868 /*
2869 * Hotplug ports handled by firmware in System Management Mode
2870 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2871 */
2872 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2873 return false;
2874
2875 if (pci_bridge_d3_force)
2876 return true;
2877
2878 /* Even the oldest 2010 Thunderbolt controller supports D3. */
2879 if (bridge->is_thunderbolt)
2880 return true;
2881
David Brazdil0f672f62019-12-10 10:32:29 +00002882 /* Platform might know better if the bridge supports D3 */
2883 if (platform_pci_bridge_d3(bridge))
2884 return true;
2885
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002886 /*
2887 * Hotplug ports handled natively by the OS were not validated
2888 * by vendors for runtime D3 at least until 2018 because there
2889 * was no OS support.
2890 */
2891 if (bridge->is_hotplug_bridge)
2892 return false;
2893
David Brazdil0f672f62019-12-10 10:32:29 +00002894 if (dmi_check_system(bridge_d3_blacklist))
2895 return false;
2896
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002897 /*
2898 * It should be safe to put PCIe ports from 2015 or newer
2899 * to D3.
2900 */
2901 if (dmi_get_bios_year() >= 2015)
2902 return true;
2903 break;
2904 }
2905
2906 return false;
2907}
2908
2909static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2910{
2911 bool *d3cold_ok = data;
2912
2913 if (/* The device needs to be allowed to go D3cold ... */
2914 dev->no_d3cold || !dev->d3cold_allowed ||
2915
2916 /* ... and if it is wakeup capable to do so from D3cold. */
2917 (device_may_wakeup(&dev->dev) &&
2918 !pci_pme_capable(dev, PCI_D3cold)) ||
2919
2920 /* If it is a bridge it must be allowed to go to D3. */
2921 !pci_power_manageable(dev))
2922
2923 *d3cold_ok = false;
2924
2925 return !*d3cold_ok;
2926}
2927
2928/*
2929 * pci_bridge_d3_update - Update bridge D3 capabilities
2930 * @dev: PCI device which is changed
2931 *
2932 * Update upstream bridge PM capabilities accordingly depending on if the
2933 * device PM configuration was changed or the device is being removed. The
2934 * change is also propagated upstream.
2935 */
2936void pci_bridge_d3_update(struct pci_dev *dev)
2937{
2938 bool remove = !device_is_registered(&dev->dev);
2939 struct pci_dev *bridge;
2940 bool d3cold_ok = true;
2941
2942 bridge = pci_upstream_bridge(dev);
2943 if (!bridge || !pci_bridge_d3_possible(bridge))
2944 return;
2945
2946 /*
2947 * If D3 is currently allowed for the bridge, removing one of its
2948 * children won't change that.
2949 */
2950 if (remove && bridge->bridge_d3)
2951 return;
2952
2953 /*
2954 * If D3 is currently allowed for the bridge and a child is added or
2955 * changed, disallowance of D3 can only be caused by that child, so
2956 * we only need to check that single device, not any of its siblings.
2957 *
2958 * If D3 is currently not allowed for the bridge, checking the device
2959 * first may allow us to skip checking its siblings.
2960 */
2961 if (!remove)
2962 pci_dev_check_d3cold(dev, &d3cold_ok);
2963
2964 /*
2965 * If D3 is currently not allowed for the bridge, this may be caused
2966 * either by the device being changed/removed or any of its siblings,
2967 * so we need to go through all children to find out if one of them
2968 * continues to block D3.
2969 */
2970 if (d3cold_ok && !bridge->bridge_d3)
2971 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2972 &d3cold_ok);
2973
2974 if (bridge->bridge_d3 != d3cold_ok) {
2975 bridge->bridge_d3 = d3cold_ok;
2976 /* Propagate change to upstream bridges */
2977 pci_bridge_d3_update(bridge);
2978 }
2979}
2980
2981/**
2982 * pci_d3cold_enable - Enable D3cold for device
2983 * @dev: PCI device to handle
2984 *
2985 * This function can be used in drivers to enable D3cold from the device
2986 * they handle. It also updates upstream PCI bridge PM capabilities
2987 * accordingly.
2988 */
2989void pci_d3cold_enable(struct pci_dev *dev)
2990{
2991 if (dev->no_d3cold) {
2992 dev->no_d3cold = false;
2993 pci_bridge_d3_update(dev);
2994 }
2995}
2996EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2997
2998/**
2999 * pci_d3cold_disable - Disable D3cold for device
3000 * @dev: PCI device to handle
3001 *
3002 * This function can be used in drivers to disable D3cold from the device
3003 * they handle. It also updates upstream PCI bridge PM capabilities
3004 * accordingly.
3005 */
3006void pci_d3cold_disable(struct pci_dev *dev)
3007{
3008 if (!dev->no_d3cold) {
3009 dev->no_d3cold = true;
3010 pci_bridge_d3_update(dev);
3011 }
3012}
3013EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3014
3015/**
3016 * pci_pm_init - Initialize PM functions of given PCI device
3017 * @dev: PCI device to handle.
3018 */
3019void pci_pm_init(struct pci_dev *dev)
3020{
3021 int pm;
David Brazdil0f672f62019-12-10 10:32:29 +00003022 u16 status;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003023 u16 pmc;
3024
3025 pm_runtime_forbid(&dev->dev);
3026 pm_runtime_set_active(&dev->dev);
3027 pm_runtime_enable(&dev->dev);
3028 device_enable_async_suspend(&dev->dev);
3029 dev->wakeup_prepared = false;
3030
3031 dev->pm_cap = 0;
3032 dev->pme_support = 0;
3033
3034 /* find PCI PM capability in list */
3035 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3036 if (!pm)
3037 return;
3038 /* Check device's ability to generate PME# */
3039 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3040
3041 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3042 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3043 pmc & PCI_PM_CAP_VER_MASK);
3044 return;
3045 }
3046
3047 dev->pm_cap = pm;
Olivier Deprez157378f2022-04-04 15:47:50 +02003048 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003049 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3050 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3051 dev->d3cold_allowed = true;
3052
3053 dev->d1_support = false;
3054 dev->d2_support = false;
3055 if (!pci_no_d1d2(dev)) {
3056 if (pmc & PCI_PM_CAP_D1)
3057 dev->d1_support = true;
3058 if (pmc & PCI_PM_CAP_D2)
3059 dev->d2_support = true;
3060
3061 if (dev->d1_support || dev->d2_support)
David Brazdil0f672f62019-12-10 10:32:29 +00003062 pci_info(dev, "supports%s%s\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003063 dev->d1_support ? " D1" : "",
3064 dev->d2_support ? " D2" : "");
3065 }
3066
3067 pmc &= PCI_PM_CAP_PME_MASK;
3068 if (pmc) {
David Brazdil0f672f62019-12-10 10:32:29 +00003069 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003070 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3071 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3072 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
Olivier Deprez157378f2022-04-04 15:47:50 +02003073 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003074 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3075 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3076 dev->pme_poll = true;
3077 /*
3078 * Make device's PM flags reflect the wake-up capability, but
3079 * let the user space enable it to wake up the system as needed.
3080 */
3081 device_set_wakeup_capable(&dev->dev, true);
3082 /* Disable the PME# generation functionality */
3083 pci_pme_active(dev, false);
3084 }
David Brazdil0f672f62019-12-10 10:32:29 +00003085
3086 pci_read_config_word(dev, PCI_STATUS, &status);
3087 if (status & PCI_STATUS_IMM_READY)
3088 dev->imm_ready = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003089}
3090
3091static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3092{
3093 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3094
3095 switch (prop) {
3096 case PCI_EA_P_MEM:
3097 case PCI_EA_P_VF_MEM:
3098 flags |= IORESOURCE_MEM;
3099 break;
3100 case PCI_EA_P_MEM_PREFETCH:
3101 case PCI_EA_P_VF_MEM_PREFETCH:
3102 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3103 break;
3104 case PCI_EA_P_IO:
3105 flags |= IORESOURCE_IO;
3106 break;
3107 default:
3108 return 0;
3109 }
3110
3111 return flags;
3112}
3113
3114static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3115 u8 prop)
3116{
3117 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3118 return &dev->resource[bei];
3119#ifdef CONFIG_PCI_IOV
3120 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3121 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3122 return &dev->resource[PCI_IOV_RESOURCES +
3123 bei - PCI_EA_BEI_VF_BAR0];
3124#endif
3125 else if (bei == PCI_EA_BEI_ROM)
3126 return &dev->resource[PCI_ROM_RESOURCE];
3127 else
3128 return NULL;
3129}
3130
3131/* Read an Enhanced Allocation (EA) entry */
3132static int pci_ea_read(struct pci_dev *dev, int offset)
3133{
3134 struct resource *res;
3135 int ent_size, ent_offset = offset;
3136 resource_size_t start, end;
3137 unsigned long flags;
3138 u32 dw0, bei, base, max_offset;
3139 u8 prop;
3140 bool support_64 = (sizeof(resource_size_t) >= 8);
3141
3142 pci_read_config_dword(dev, ent_offset, &dw0);
3143 ent_offset += 4;
3144
3145 /* Entry size field indicates DWORDs after 1st */
3146 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3147
3148 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3149 goto out;
3150
3151 bei = (dw0 & PCI_EA_BEI) >> 4;
3152 prop = (dw0 & PCI_EA_PP) >> 8;
3153
3154 /*
3155 * If the Property is in the reserved range, try the Secondary
3156 * Property instead.
3157 */
3158 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3159 prop = (dw0 & PCI_EA_SP) >> 16;
3160 if (prop > PCI_EA_P_BRIDGE_IO)
3161 goto out;
3162
3163 res = pci_ea_get_resource(dev, bei, prop);
3164 if (!res) {
3165 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3166 goto out;
3167 }
3168
3169 flags = pci_ea_flags(dev, prop);
3170 if (!flags) {
3171 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3172 goto out;
3173 }
3174
3175 /* Read Base */
3176 pci_read_config_dword(dev, ent_offset, &base);
3177 start = (base & PCI_EA_FIELD_MASK);
3178 ent_offset += 4;
3179
3180 /* Read MaxOffset */
3181 pci_read_config_dword(dev, ent_offset, &max_offset);
3182 ent_offset += 4;
3183
3184 /* Read Base MSBs (if 64-bit entry) */
3185 if (base & PCI_EA_IS_64) {
3186 u32 base_upper;
3187
3188 pci_read_config_dword(dev, ent_offset, &base_upper);
3189 ent_offset += 4;
3190
3191 flags |= IORESOURCE_MEM_64;
3192
3193 /* entry starts above 32-bit boundary, can't use */
3194 if (!support_64 && base_upper)
3195 goto out;
3196
3197 if (support_64)
3198 start |= ((u64)base_upper << 32);
3199 }
3200
3201 end = start + (max_offset | 0x03);
3202
3203 /* Read MaxOffset MSBs (if 64-bit entry) */
3204 if (max_offset & PCI_EA_IS_64) {
3205 u32 max_offset_upper;
3206
3207 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3208 ent_offset += 4;
3209
3210 flags |= IORESOURCE_MEM_64;
3211
3212 /* entry too big, can't use */
3213 if (!support_64 && max_offset_upper)
3214 goto out;
3215
3216 if (support_64)
3217 end += ((u64)max_offset_upper << 32);
3218 }
3219
3220 if (end < start) {
3221 pci_err(dev, "EA Entry crosses address boundary\n");
3222 goto out;
3223 }
3224
3225 if (ent_size != ent_offset - offset) {
3226 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3227 ent_size, ent_offset - offset);
3228 goto out;
3229 }
3230
3231 res->name = pci_name(dev);
3232 res->start = start;
3233 res->end = end;
3234 res->flags = flags;
3235
3236 if (bei <= PCI_EA_BEI_BAR5)
David Brazdil0f672f62019-12-10 10:32:29 +00003237 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003238 bei, res, prop);
3239 else if (bei == PCI_EA_BEI_ROM)
David Brazdil0f672f62019-12-10 10:32:29 +00003240 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003241 res, prop);
3242 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
David Brazdil0f672f62019-12-10 10:32:29 +00003243 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003244 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3245 else
David Brazdil0f672f62019-12-10 10:32:29 +00003246 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003247 bei, res, prop);
3248
3249out:
3250 return offset + ent_size;
3251}
3252
3253/* Enhanced Allocation Initialization */
3254void pci_ea_init(struct pci_dev *dev)
3255{
3256 int ea;
3257 u8 num_ent;
3258 int offset;
3259 int i;
3260
3261 /* find PCI EA capability in list */
3262 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3263 if (!ea)
3264 return;
3265
3266 /* determine the number of entries */
3267 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3268 &num_ent);
3269 num_ent &= PCI_EA_NUM_ENT_MASK;
3270
3271 offset = ea + PCI_EA_FIRST_ENT;
3272
3273 /* Skip DWORD 2 for type 1 functions */
3274 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3275 offset += 4;
3276
3277 /* parse each EA entry */
3278 for (i = 0; i < num_ent; ++i)
3279 offset = pci_ea_read(dev, offset);
3280}
3281
3282static void pci_add_saved_cap(struct pci_dev *pci_dev,
3283 struct pci_cap_saved_state *new_cap)
3284{
3285 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3286}
3287
3288/**
3289 * _pci_add_cap_save_buffer - allocate buffer for saving given
David Brazdil0f672f62019-12-10 10:32:29 +00003290 * capability registers
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003291 * @dev: the PCI device
3292 * @cap: the capability to allocate the buffer for
3293 * @extended: Standard or Extended capability ID
3294 * @size: requested size of the buffer
3295 */
3296static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3297 bool extended, unsigned int size)
3298{
3299 int pos;
3300 struct pci_cap_saved_state *save_state;
3301
3302 if (extended)
3303 pos = pci_find_ext_capability(dev, cap);
3304 else
3305 pos = pci_find_capability(dev, cap);
3306
3307 if (!pos)
3308 return 0;
3309
3310 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3311 if (!save_state)
3312 return -ENOMEM;
3313
3314 save_state->cap.cap_nr = cap;
3315 save_state->cap.cap_extended = extended;
3316 save_state->cap.size = size;
3317 pci_add_saved_cap(dev, save_state);
3318
3319 return 0;
3320}
3321
3322int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3323{
3324 return _pci_add_cap_save_buffer(dev, cap, false, size);
3325}
3326
3327int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3328{
3329 return _pci_add_cap_save_buffer(dev, cap, true, size);
3330}
3331
3332/**
3333 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3334 * @dev: the PCI device
3335 */
3336void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3337{
3338 int error;
3339
3340 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3341 PCI_EXP_SAVE_REGS * sizeof(u16));
3342 if (error)
3343 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3344
3345 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3346 if (error)
3347 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3348
David Brazdil0f672f62019-12-10 10:32:29 +00003349 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3350 2 * sizeof(u16));
3351 if (error)
3352 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3353
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003354 pci_allocate_vc_save_buffers(dev);
3355}
3356
3357void pci_free_cap_save_buffers(struct pci_dev *dev)
3358{
3359 struct pci_cap_saved_state *tmp;
3360 struct hlist_node *n;
3361
3362 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3363 kfree(tmp);
3364}
3365
3366/**
3367 * pci_configure_ari - enable or disable ARI forwarding
3368 * @dev: the PCI device
3369 *
3370 * If @dev and its upstream bridge both support ARI, enable ARI in the
3371 * bridge. Otherwise, disable ARI in the bridge.
3372 */
3373void pci_configure_ari(struct pci_dev *dev)
3374{
3375 u32 cap;
3376 struct pci_dev *bridge;
3377
3378 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3379 return;
3380
3381 bridge = dev->bus->self;
3382 if (!bridge)
3383 return;
3384
3385 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3386 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3387 return;
3388
3389 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3390 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3391 PCI_EXP_DEVCTL2_ARI);
3392 bridge->ari_enabled = 1;
3393 } else {
3394 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3395 PCI_EXP_DEVCTL2_ARI);
3396 bridge->ari_enabled = 0;
3397 }
3398}
3399
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003400static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3401{
3402 int pos;
3403 u16 cap, ctrl;
3404
Olivier Deprez157378f2022-04-04 15:47:50 +02003405 pos = pdev->acs_cap;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003406 if (!pos)
3407 return false;
3408
3409 /*
3410 * Except for egress control, capabilities are either required
3411 * or only required if controllable. Features missing from the
3412 * capability field can therefore be assumed as hard-wired enabled.
3413 */
3414 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3415 acs_flags &= (cap | PCI_ACS_EC);
3416
3417 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3418 return (ctrl & acs_flags) == acs_flags;
3419}
3420
3421/**
3422 * pci_acs_enabled - test ACS against required flags for a given device
3423 * @pdev: device to test
3424 * @acs_flags: required PCI ACS flags
3425 *
3426 * Return true if the device supports the provided flags. Automatically
3427 * filters out flags that are not implemented on multifunction devices.
3428 *
3429 * Note that this interface checks the effective ACS capabilities of the
3430 * device rather than the actual capabilities. For instance, most single
3431 * function endpoints are not required to support ACS because they have no
3432 * opportunity for peer-to-peer access. We therefore return 'true'
3433 * regardless of whether the device exposes an ACS capability. This makes
3434 * it much easier for callers of this function to ignore the actual type
3435 * or topology of the device when testing ACS support.
3436 */
3437bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3438{
3439 int ret;
3440
3441 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3442 if (ret >= 0)
3443 return ret > 0;
3444
3445 /*
3446 * Conventional PCI and PCI-X devices never support ACS, either
3447 * effectively or actually. The shared bus topology implies that
3448 * any device on the bus can receive or snoop DMA.
3449 */
3450 if (!pci_is_pcie(pdev))
3451 return false;
3452
3453 switch (pci_pcie_type(pdev)) {
3454 /*
3455 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3456 * but since their primary interface is PCI/X, we conservatively
3457 * handle them as we would a non-PCIe device.
3458 */
3459 case PCI_EXP_TYPE_PCIE_BRIDGE:
3460 /*
3461 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
3462 * applicable... must never implement an ACS Extended Capability...".
3463 * This seems arbitrary, but we take a conservative interpretation
3464 * of this statement.
3465 */
3466 case PCI_EXP_TYPE_PCI_BRIDGE:
3467 case PCI_EXP_TYPE_RC_EC:
3468 return false;
3469 /*
3470 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3471 * implement ACS in order to indicate their peer-to-peer capabilities,
3472 * regardless of whether they are single- or multi-function devices.
3473 */
3474 case PCI_EXP_TYPE_DOWNSTREAM:
3475 case PCI_EXP_TYPE_ROOT_PORT:
3476 return pci_acs_flags_enabled(pdev, acs_flags);
3477 /*
3478 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3479 * implemented by the remaining PCIe types to indicate peer-to-peer
3480 * capabilities, but only when they are part of a multifunction
3481 * device. The footnote for section 6.12 indicates the specific
3482 * PCIe types included here.
3483 */
3484 case PCI_EXP_TYPE_ENDPOINT:
3485 case PCI_EXP_TYPE_UPSTREAM:
3486 case PCI_EXP_TYPE_LEG_END:
3487 case PCI_EXP_TYPE_RC_END:
3488 if (!pdev->multifunction)
3489 break;
3490
3491 return pci_acs_flags_enabled(pdev, acs_flags);
3492 }
3493
3494 /*
3495 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3496 * to single function devices with the exception of downstream ports.
3497 */
3498 return true;
3499}
3500
3501/**
3502 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
3503 * @start: starting downstream device
3504 * @end: ending upstream device or NULL to search to the root bus
3505 * @acs_flags: required flags
3506 *
3507 * Walk up a device tree from start to end testing PCI ACS support. If
3508 * any step along the way does not support the required flags, return false.
3509 */
3510bool pci_acs_path_enabled(struct pci_dev *start,
3511 struct pci_dev *end, u16 acs_flags)
3512{
3513 struct pci_dev *pdev, *parent = start;
3514
3515 do {
3516 pdev = parent;
3517
3518 if (!pci_acs_enabled(pdev, acs_flags))
3519 return false;
3520
3521 if (pci_is_root_bus(pdev->bus))
3522 return (end == NULL);
3523
3524 parent = pdev->bus->self;
3525 } while (pdev != end);
3526
3527 return true;
3528}
3529
3530/**
Olivier Deprez157378f2022-04-04 15:47:50 +02003531 * pci_acs_init - Initialize ACS if hardware supports it
3532 * @dev: the PCI device
3533 */
3534void pci_acs_init(struct pci_dev *dev)
3535{
3536 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3537
3538 /*
3539 * Attempt to enable ACS regardless of capability because some Root
3540 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3541 * the standard ACS capability but still support ACS via those
3542 * quirks.
3543 */
3544 pci_enable_acs(dev);
3545}
3546
3547/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003548 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3549 * @pdev: PCI device
3550 * @bar: BAR to find
3551 *
3552 * Helper to find the position of the ctrl register for a BAR.
3553 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3554 * Returns -ENOENT if no ctrl register for the BAR could be found.
3555 */
3556static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3557{
3558 unsigned int pos, nbars, i;
3559 u32 ctrl;
3560
3561 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3562 if (!pos)
3563 return -ENOTSUPP;
3564
3565 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3566 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3567 PCI_REBAR_CTRL_NBAR_SHIFT;
3568
3569 for (i = 0; i < nbars; i++, pos += 8) {
3570 int bar_idx;
3571
3572 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3573 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3574 if (bar_idx == bar)
3575 return pos;
3576 }
3577
3578 return -ENOENT;
3579}
3580
3581/**
3582 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3583 * @pdev: PCI device
3584 * @bar: BAR to query
3585 *
3586 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3587 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3588 */
3589u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3590{
3591 int pos;
3592 u32 cap;
3593
3594 pos = pci_rebar_find_pos(pdev, bar);
3595 if (pos < 0)
3596 return 0;
3597
3598 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
Olivier Deprez0e641232021-09-23 10:07:05 +02003599 cap &= PCI_REBAR_CAP_SIZES;
3600
3601 /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3602 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3603 bar == 0 && cap == 0x7000)
3604 cap = 0x3f000;
3605
3606 return cap >> 4;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003607}
3608
3609/**
3610 * pci_rebar_get_current_size - get the current size of a BAR
3611 * @pdev: PCI device
3612 * @bar: BAR to set size to
3613 *
3614 * Read the size of a BAR from the resizable BAR config.
3615 * Returns size if found or negative error code.
3616 */
3617int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3618{
3619 int pos;
3620 u32 ctrl;
3621
3622 pos = pci_rebar_find_pos(pdev, bar);
3623 if (pos < 0)
3624 return pos;
3625
3626 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3627 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3628}
3629
3630/**
3631 * pci_rebar_set_size - set a new size for a BAR
3632 * @pdev: PCI device
3633 * @bar: BAR to set size to
3634 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3635 *
3636 * Set the new size of a BAR as defined in the spec.
3637 * Returns zero if resizing was successful, error code otherwise.
3638 */
3639int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3640{
3641 int pos;
3642 u32 ctrl;
3643
3644 pos = pci_rebar_find_pos(pdev, bar);
3645 if (pos < 0)
3646 return pos;
3647
3648 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3649 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3650 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3651 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3652 return 0;
3653}
3654
3655/**
3656 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3657 * @dev: the PCI device
3658 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3659 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3660 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3661 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3662 *
3663 * Return 0 if all upstream bridges support AtomicOp routing, egress
3664 * blocking is disabled on all upstream ports, and the root port supports
3665 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3666 * AtomicOp completion), or negative otherwise.
3667 */
3668int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3669{
3670 struct pci_bus *bus = dev->bus;
3671 struct pci_dev *bridge;
3672 u32 cap, ctl2;
3673
3674 if (!pci_is_pcie(dev))
3675 return -EINVAL;
3676
3677 /*
3678 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3679 * AtomicOp requesters. For now, we only support endpoints as
3680 * requesters and root ports as completers. No endpoints as
3681 * completers, and no peer-to-peer.
3682 */
3683
3684 switch (pci_pcie_type(dev)) {
3685 case PCI_EXP_TYPE_ENDPOINT:
3686 case PCI_EXP_TYPE_LEG_END:
3687 case PCI_EXP_TYPE_RC_END:
3688 break;
3689 default:
3690 return -EINVAL;
3691 }
3692
3693 while (bus->parent) {
3694 bridge = bus->self;
3695
3696 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3697
3698 switch (pci_pcie_type(bridge)) {
3699 /* Ensure switch ports support AtomicOp routing */
3700 case PCI_EXP_TYPE_UPSTREAM:
3701 case PCI_EXP_TYPE_DOWNSTREAM:
3702 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3703 return -EINVAL;
3704 break;
3705
3706 /* Ensure root port supports all the sizes we care about */
3707 case PCI_EXP_TYPE_ROOT_PORT:
3708 if ((cap & cap_mask) != cap_mask)
3709 return -EINVAL;
3710 break;
3711 }
3712
3713 /* Ensure upstream ports don't block AtomicOps on egress */
David Brazdil0f672f62019-12-10 10:32:29 +00003714 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003715 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3716 &ctl2);
3717 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3718 return -EINVAL;
3719 }
3720
3721 bus = bus->parent;
3722 }
3723
3724 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3725 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3726 return 0;
3727}
3728EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3729
3730/**
3731 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3732 * @dev: the PCI device
3733 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3734 *
3735 * Perform INTx swizzling for a device behind one level of bridge. This is
3736 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3737 * behind bridges on add-in cards. For devices with ARI enabled, the slot
3738 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3739 * the PCI Express Base Specification, Revision 2.1)
3740 */
3741u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3742{
3743 int slot;
3744
3745 if (pci_ari_enabled(dev->bus))
3746 slot = 0;
3747 else
3748 slot = PCI_SLOT(dev->devfn);
3749
3750 return (((pin - 1) + slot) % 4) + 1;
3751}
3752
3753int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3754{
3755 u8 pin;
3756
3757 pin = dev->pin;
3758 if (!pin)
3759 return -1;
3760
3761 while (!pci_is_root_bus(dev->bus)) {
3762 pin = pci_swizzle_interrupt_pin(dev, pin);
3763 dev = dev->bus->self;
3764 }
3765 *bridge = dev;
3766 return pin;
3767}
3768
3769/**
3770 * pci_common_swizzle - swizzle INTx all the way to root bridge
3771 * @dev: the PCI device
3772 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3773 *
3774 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
3775 * bridges all the way up to a PCI root bus.
3776 */
3777u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3778{
3779 u8 pin = *pinp;
3780
3781 while (!pci_is_root_bus(dev->bus)) {
3782 pin = pci_swizzle_interrupt_pin(dev, pin);
3783 dev = dev->bus->self;
3784 }
3785 *pinp = pin;
3786 return PCI_SLOT(dev->devfn);
3787}
3788EXPORT_SYMBOL_GPL(pci_common_swizzle);
3789
3790/**
David Brazdil0f672f62019-12-10 10:32:29 +00003791 * pci_release_region - Release a PCI bar
3792 * @pdev: PCI device whose resources were previously reserved by
3793 * pci_request_region()
3794 * @bar: BAR to release
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003795 *
David Brazdil0f672f62019-12-10 10:32:29 +00003796 * Releases the PCI I/O and memory resources previously reserved by a
3797 * successful call to pci_request_region(). Call this function only
3798 * after all use of the PCI regions has ceased.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003799 */
3800void pci_release_region(struct pci_dev *pdev, int bar)
3801{
3802 struct pci_devres *dr;
3803
3804 if (pci_resource_len(pdev, bar) == 0)
3805 return;
3806 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3807 release_region(pci_resource_start(pdev, bar),
3808 pci_resource_len(pdev, bar));
3809 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3810 release_mem_region(pci_resource_start(pdev, bar),
3811 pci_resource_len(pdev, bar));
3812
3813 dr = find_pci_dr(pdev);
3814 if (dr)
3815 dr->region_mask &= ~(1 << bar);
3816}
3817EXPORT_SYMBOL(pci_release_region);
3818
3819/**
David Brazdil0f672f62019-12-10 10:32:29 +00003820 * __pci_request_region - Reserved PCI I/O and memory resource
3821 * @pdev: PCI device whose resources are to be reserved
3822 * @bar: BAR to be reserved
3823 * @res_name: Name to be associated with resource.
3824 * @exclusive: whether the region access is exclusive or not
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003825 *
David Brazdil0f672f62019-12-10 10:32:29 +00003826 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3827 * being reserved by owner @res_name. Do not access any
3828 * address inside the PCI regions unless this call returns
3829 * successfully.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003830 *
David Brazdil0f672f62019-12-10 10:32:29 +00003831 * If @exclusive is set, then the region is marked so that userspace
3832 * is explicitly not allowed to map the resource via /dev/mem or
3833 * sysfs MMIO access.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003834 *
David Brazdil0f672f62019-12-10 10:32:29 +00003835 * Returns 0 on success, or %EBUSY on error. A warning
3836 * message is also printed on failure.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003837 */
3838static int __pci_request_region(struct pci_dev *pdev, int bar,
3839 const char *res_name, int exclusive)
3840{
3841 struct pci_devres *dr;
3842
3843 if (pci_resource_len(pdev, bar) == 0)
3844 return 0;
3845
3846 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3847 if (!request_region(pci_resource_start(pdev, bar),
3848 pci_resource_len(pdev, bar), res_name))
3849 goto err_out;
3850 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3851 if (!__request_mem_region(pci_resource_start(pdev, bar),
3852 pci_resource_len(pdev, bar), res_name,
3853 exclusive))
3854 goto err_out;
3855 }
3856
3857 dr = find_pci_dr(pdev);
3858 if (dr)
3859 dr->region_mask |= 1 << bar;
3860
3861 return 0;
3862
3863err_out:
3864 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3865 &pdev->resource[bar]);
3866 return -EBUSY;
3867}
3868
3869/**
David Brazdil0f672f62019-12-10 10:32:29 +00003870 * pci_request_region - Reserve PCI I/O and memory resource
3871 * @pdev: PCI device whose resources are to be reserved
3872 * @bar: BAR to be reserved
3873 * @res_name: Name to be associated with resource
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003874 *
David Brazdil0f672f62019-12-10 10:32:29 +00003875 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3876 * being reserved by owner @res_name. Do not access any
3877 * address inside the PCI regions unless this call returns
3878 * successfully.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003879 *
David Brazdil0f672f62019-12-10 10:32:29 +00003880 * Returns 0 on success, or %EBUSY on error. A warning
3881 * message is also printed on failure.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003882 */
3883int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3884{
3885 return __pci_request_region(pdev, bar, res_name, 0);
3886}
3887EXPORT_SYMBOL(pci_request_region);
3888
3889/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003890 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3891 * @pdev: PCI device whose resources were previously reserved
3892 * @bars: Bitmask of BARs to be released
3893 *
3894 * Release selected PCI I/O and memory resources previously reserved.
3895 * Call this function only after all use of the PCI regions has ceased.
3896 */
3897void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3898{
3899 int i;
3900
Olivier Deprez157378f2022-04-04 15:47:50 +02003901 for (i = 0; i < PCI_STD_NUM_BARS; i++)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003902 if (bars & (1 << i))
3903 pci_release_region(pdev, i);
3904}
3905EXPORT_SYMBOL(pci_release_selected_regions);
3906
3907static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3908 const char *res_name, int excl)
3909{
3910 int i;
3911
Olivier Deprez157378f2022-04-04 15:47:50 +02003912 for (i = 0; i < PCI_STD_NUM_BARS; i++)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003913 if (bars & (1 << i))
3914 if (__pci_request_region(pdev, i, res_name, excl))
3915 goto err_out;
3916 return 0;
3917
3918err_out:
3919 while (--i >= 0)
3920 if (bars & (1 << i))
3921 pci_release_region(pdev, i);
3922
3923 return -EBUSY;
3924}
3925
3926
3927/**
3928 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3929 * @pdev: PCI device whose resources are to be reserved
3930 * @bars: Bitmask of BARs to be requested
3931 * @res_name: Name to be associated with resource
3932 */
3933int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3934 const char *res_name)
3935{
3936 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3937}
3938EXPORT_SYMBOL(pci_request_selected_regions);
3939
3940int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3941 const char *res_name)
3942{
3943 return __pci_request_selected_regions(pdev, bars, res_name,
3944 IORESOURCE_EXCLUSIVE);
3945}
3946EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3947
3948/**
David Brazdil0f672f62019-12-10 10:32:29 +00003949 * pci_release_regions - Release reserved PCI I/O and memory resources
3950 * @pdev: PCI device whose resources were previously reserved by
3951 * pci_request_regions()
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003952 *
David Brazdil0f672f62019-12-10 10:32:29 +00003953 * Releases all PCI I/O and memory resources previously reserved by a
3954 * successful call to pci_request_regions(). Call this function only
3955 * after all use of the PCI regions has ceased.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003956 */
3957
3958void pci_release_regions(struct pci_dev *pdev)
3959{
Olivier Deprez157378f2022-04-04 15:47:50 +02003960 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003961}
3962EXPORT_SYMBOL(pci_release_regions);
3963
3964/**
David Brazdil0f672f62019-12-10 10:32:29 +00003965 * pci_request_regions - Reserve PCI I/O and memory resources
3966 * @pdev: PCI device whose resources are to be reserved
3967 * @res_name: Name to be associated with resource.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003968 *
David Brazdil0f672f62019-12-10 10:32:29 +00003969 * Mark all PCI regions associated with PCI device @pdev as
3970 * being reserved by owner @res_name. Do not access any
3971 * address inside the PCI regions unless this call returns
3972 * successfully.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003973 *
David Brazdil0f672f62019-12-10 10:32:29 +00003974 * Returns 0 on success, or %EBUSY on error. A warning
3975 * message is also printed on failure.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003976 */
3977int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3978{
Olivier Deprez157378f2022-04-04 15:47:50 +02003979 return pci_request_selected_regions(pdev,
3980 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003981}
3982EXPORT_SYMBOL(pci_request_regions);
3983
3984/**
David Brazdil0f672f62019-12-10 10:32:29 +00003985 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
3986 * @pdev: PCI device whose resources are to be reserved
3987 * @res_name: Name to be associated with resource.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003988 *
David Brazdil0f672f62019-12-10 10:32:29 +00003989 * Mark all PCI regions associated with PCI device @pdev as being reserved
3990 * by owner @res_name. Do not access any address inside the PCI regions
3991 * unless this call returns successfully.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003992 *
David Brazdil0f672f62019-12-10 10:32:29 +00003993 * pci_request_regions_exclusive() will mark the region so that /dev/mem
3994 * and the sysfs MMIO access will not be allowed.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003995 *
David Brazdil0f672f62019-12-10 10:32:29 +00003996 * Returns 0 on success, or %EBUSY on error. A warning message is also
3997 * printed on failure.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003998 */
3999int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4000{
4001 return pci_request_selected_regions_exclusive(pdev,
Olivier Deprez157378f2022-04-04 15:47:50 +02004002 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004003}
4004EXPORT_SYMBOL(pci_request_regions_exclusive);
4005
4006/*
4007 * Record the PCI IO range (expressed as CPU physical address + size).
David Brazdil0f672f62019-12-10 10:32:29 +00004008 * Return a negative value if an error has occurred, zero otherwise
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004009 */
4010int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4011 resource_size_t size)
4012{
4013 int ret = 0;
4014#ifdef PCI_IOBASE
4015 struct logic_pio_hwaddr *range;
4016
4017 if (!size || addr + size < addr)
4018 return -EINVAL;
4019
4020 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4021 if (!range)
4022 return -ENOMEM;
4023
4024 range->fwnode = fwnode;
4025 range->size = size;
4026 range->hw_start = addr;
4027 range->flags = LOGIC_PIO_CPU_MMIO;
4028
4029 ret = logic_pio_register_range(range);
4030 if (ret)
4031 kfree(range);
Olivier Deprez0e641232021-09-23 10:07:05 +02004032
4033 /* Ignore duplicates due to deferred probing */
4034 if (ret == -EEXIST)
4035 ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004036#endif
4037
4038 return ret;
4039}
4040
4041phys_addr_t pci_pio_to_address(unsigned long pio)
4042{
4043 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4044
4045#ifdef PCI_IOBASE
4046 if (pio >= MMIO_UPPER_LIMIT)
4047 return address;
4048
4049 address = logic_pio_to_hwaddr(pio);
4050#endif
4051
4052 return address;
4053}
Olivier Deprez157378f2022-04-04 15:47:50 +02004054EXPORT_SYMBOL_GPL(pci_pio_to_address);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004055
4056unsigned long __weak pci_address_to_pio(phys_addr_t address)
4057{
4058#ifdef PCI_IOBASE
4059 return logic_pio_trans_cpuaddr(address);
4060#else
4061 if (address > IO_SPACE_LIMIT)
4062 return (unsigned long)-1;
4063
4064 return (unsigned long) address;
4065#endif
4066}
4067
4068/**
David Brazdil0f672f62019-12-10 10:32:29 +00004069 * pci_remap_iospace - Remap the memory mapped I/O space
4070 * @res: Resource describing the I/O space
4071 * @phys_addr: physical address of range to be mapped
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004072 *
David Brazdil0f672f62019-12-10 10:32:29 +00004073 * Remap the memory mapped I/O space described by the @res and the CPU
4074 * physical address @phys_addr into virtual address space. Only
4075 * architectures that have memory mapped IO functions defined (and the
4076 * PCI_IOBASE value defined) should call this function.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004077 */
4078int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4079{
4080#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4081 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4082
4083 if (!(res->flags & IORESOURCE_IO))
4084 return -EINVAL;
4085
4086 if (res->end > IO_SPACE_LIMIT)
4087 return -EINVAL;
4088
4089 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4090 pgprot_device(PAGE_KERNEL));
4091#else
David Brazdil0f672f62019-12-10 10:32:29 +00004092 /*
4093 * This architecture does not have memory mapped I/O space,
4094 * so this function should never be called
4095 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004096 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4097 return -ENODEV;
4098#endif
4099}
4100EXPORT_SYMBOL(pci_remap_iospace);
4101
4102/**
David Brazdil0f672f62019-12-10 10:32:29 +00004103 * pci_unmap_iospace - Unmap the memory mapped I/O space
4104 * @res: resource to be unmapped
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004105 *
David Brazdil0f672f62019-12-10 10:32:29 +00004106 * Unmap the CPU virtual address @res from virtual address space. Only
4107 * architectures that have memory mapped IO functions defined (and the
4108 * PCI_IOBASE value defined) should call this function.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004109 */
4110void pci_unmap_iospace(struct resource *res)
4111{
4112#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4113 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4114
4115 unmap_kernel_range(vaddr, resource_size(res));
4116#endif
4117}
4118EXPORT_SYMBOL(pci_unmap_iospace);
4119
4120static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4121{
4122 struct resource **res = ptr;
4123
4124 pci_unmap_iospace(*res);
4125}
4126
4127/**
4128 * devm_pci_remap_iospace - Managed pci_remap_iospace()
4129 * @dev: Generic device to remap IO address for
4130 * @res: Resource describing the I/O space
4131 * @phys_addr: physical address of range to be mapped
4132 *
4133 * Managed pci_remap_iospace(). Map is automatically unmapped on driver
4134 * detach.
4135 */
4136int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4137 phys_addr_t phys_addr)
4138{
4139 const struct resource **ptr;
4140 int error;
4141
4142 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4143 if (!ptr)
4144 return -ENOMEM;
4145
4146 error = pci_remap_iospace(res, phys_addr);
4147 if (error) {
4148 devres_free(ptr);
4149 } else {
4150 *ptr = res;
4151 devres_add(dev, ptr);
4152 }
4153
4154 return error;
4155}
4156EXPORT_SYMBOL(devm_pci_remap_iospace);
4157
4158/**
4159 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4160 * @dev: Generic device to remap IO address for
4161 * @offset: Resource address to map
4162 * @size: Size of map
4163 *
4164 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
4165 * detach.
4166 */
4167void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4168 resource_size_t offset,
4169 resource_size_t size)
4170{
4171 void __iomem **ptr, *addr;
4172
4173 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4174 if (!ptr)
4175 return NULL;
4176
4177 addr = pci_remap_cfgspace(offset, size);
4178 if (addr) {
4179 *ptr = addr;
4180 devres_add(dev, ptr);
4181 } else
4182 devres_free(ptr);
4183
4184 return addr;
4185}
4186EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4187
4188/**
4189 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4190 * @dev: generic device to handle the resource for
4191 * @res: configuration space resource to be handled
4192 *
4193 * Checks that a resource is a valid memory region, requests the memory
4194 * region and ioremaps with pci_remap_cfgspace() API that ensures the
4195 * proper PCI configuration space memory attributes are guaranteed.
4196 *
4197 * All operations are managed and will be undone on driver detach.
4198 *
4199 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4200 * on failure. Usage example::
4201 *
4202 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4203 * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4204 * if (IS_ERR(base))
4205 * return PTR_ERR(base);
4206 */
4207void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4208 struct resource *res)
4209{
4210 resource_size_t size;
4211 const char *name;
4212 void __iomem *dest_ptr;
4213
4214 BUG_ON(!dev);
4215
4216 if (!res || resource_type(res) != IORESOURCE_MEM) {
4217 dev_err(dev, "invalid resource\n");
4218 return IOMEM_ERR_PTR(-EINVAL);
4219 }
4220
4221 size = resource_size(res);
4222 name = res->name ?: dev_name(dev);
4223
4224 if (!devm_request_mem_region(dev, res->start, size, name)) {
4225 dev_err(dev, "can't request region for resource %pR\n", res);
4226 return IOMEM_ERR_PTR(-EBUSY);
4227 }
4228
4229 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4230 if (!dest_ptr) {
4231 dev_err(dev, "ioremap failed for resource %pR\n", res);
4232 devm_release_mem_region(dev, res->start, size);
4233 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4234 }
4235
4236 return dest_ptr;
4237}
4238EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4239
4240static void __pci_set_master(struct pci_dev *dev, bool enable)
4241{
4242 u16 old_cmd, cmd;
4243
4244 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4245 if (enable)
4246 cmd = old_cmd | PCI_COMMAND_MASTER;
4247 else
4248 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4249 if (cmd != old_cmd) {
4250 pci_dbg(dev, "%s bus mastering\n",
4251 enable ? "enabling" : "disabling");
4252 pci_write_config_word(dev, PCI_COMMAND, cmd);
4253 }
4254 dev->is_busmaster = enable;
4255}
4256
4257/**
4258 * pcibios_setup - process "pci=" kernel boot arguments
4259 * @str: string used to pass in "pci=" kernel boot arguments
4260 *
4261 * Process kernel boot arguments. This is the default implementation.
4262 * Architecture specific implementations can override this as necessary.
4263 */
4264char * __weak __init pcibios_setup(char *str)
4265{
4266 return str;
4267}
4268
4269/**
4270 * pcibios_set_master - enable PCI bus-mastering for device dev
4271 * @dev: the PCI device to enable
4272 *
4273 * Enables PCI bus-mastering for the device. This is the default
4274 * implementation. Architecture specific implementations can override
4275 * this if necessary.
4276 */
4277void __weak pcibios_set_master(struct pci_dev *dev)
4278{
4279 u8 lat;
4280
4281 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4282 if (pci_is_pcie(dev))
4283 return;
4284
4285 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4286 if (lat < 16)
4287 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4288 else if (lat > pcibios_max_latency)
4289 lat = pcibios_max_latency;
4290 else
4291 return;
4292
4293 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4294}
4295
4296/**
4297 * pci_set_master - enables bus-mastering for device dev
4298 * @dev: the PCI device to enable
4299 *
4300 * Enables bus-mastering on the device and calls pcibios_set_master()
4301 * to do the needed arch specific settings.
4302 */
4303void pci_set_master(struct pci_dev *dev)
4304{
4305 __pci_set_master(dev, true);
4306 pcibios_set_master(dev);
4307}
4308EXPORT_SYMBOL(pci_set_master);
4309
4310/**
4311 * pci_clear_master - disables bus-mastering for device dev
4312 * @dev: the PCI device to disable
4313 */
4314void pci_clear_master(struct pci_dev *dev)
4315{
4316 __pci_set_master(dev, false);
4317}
4318EXPORT_SYMBOL(pci_clear_master);
4319
4320/**
4321 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4322 * @dev: the PCI device for which MWI is to be enabled
4323 *
4324 * Helper function for pci_set_mwi.
4325 * Originally copied from drivers/net/acenic.c.
4326 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4327 *
4328 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4329 */
4330int pci_set_cacheline_size(struct pci_dev *dev)
4331{
4332 u8 cacheline_size;
4333
4334 if (!pci_cache_line_size)
4335 return -EINVAL;
4336
4337 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4338 equal to or multiple of the right value. */
4339 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4340 if (cacheline_size >= pci_cache_line_size &&
4341 (cacheline_size % pci_cache_line_size) == 0)
4342 return 0;
4343
4344 /* Write the correct value. */
4345 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4346 /* Read it back. */
4347 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4348 if (cacheline_size == pci_cache_line_size)
4349 return 0;
4350
David Brazdil0f672f62019-12-10 10:32:29 +00004351 pci_info(dev, "cache line size of %d is not supported\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004352 pci_cache_line_size << 2);
4353
4354 return -EINVAL;
4355}
4356EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4357
4358/**
4359 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4360 * @dev: the PCI device for which MWI is enabled
4361 *
4362 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4363 *
4364 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4365 */
4366int pci_set_mwi(struct pci_dev *dev)
4367{
4368#ifdef PCI_DISABLE_MWI
4369 return 0;
4370#else
4371 int rc;
4372 u16 cmd;
4373
4374 rc = pci_set_cacheline_size(dev);
4375 if (rc)
4376 return rc;
4377
4378 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4379 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4380 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4381 cmd |= PCI_COMMAND_INVALIDATE;
4382 pci_write_config_word(dev, PCI_COMMAND, cmd);
4383 }
4384 return 0;
4385#endif
4386}
4387EXPORT_SYMBOL(pci_set_mwi);
4388
4389/**
4390 * pcim_set_mwi - a device-managed pci_set_mwi()
4391 * @dev: the PCI device for which MWI is enabled
4392 *
4393 * Managed pci_set_mwi().
4394 *
4395 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4396 */
4397int pcim_set_mwi(struct pci_dev *dev)
4398{
4399 struct pci_devres *dr;
4400
4401 dr = find_pci_dr(dev);
4402 if (!dr)
4403 return -ENOMEM;
4404
4405 dr->mwi = 1;
4406 return pci_set_mwi(dev);
4407}
4408EXPORT_SYMBOL(pcim_set_mwi);
4409
4410/**
4411 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4412 * @dev: the PCI device for which MWI is enabled
4413 *
4414 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4415 * Callers are not required to check the return value.
4416 *
4417 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4418 */
4419int pci_try_set_mwi(struct pci_dev *dev)
4420{
4421#ifdef PCI_DISABLE_MWI
4422 return 0;
4423#else
4424 return pci_set_mwi(dev);
4425#endif
4426}
4427EXPORT_SYMBOL(pci_try_set_mwi);
4428
4429/**
4430 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4431 * @dev: the PCI device to disable
4432 *
4433 * Disables PCI Memory-Write-Invalidate transaction on the device
4434 */
4435void pci_clear_mwi(struct pci_dev *dev)
4436{
4437#ifndef PCI_DISABLE_MWI
4438 u16 cmd;
4439
4440 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4441 if (cmd & PCI_COMMAND_INVALIDATE) {
4442 cmd &= ~PCI_COMMAND_INVALIDATE;
4443 pci_write_config_word(dev, PCI_COMMAND, cmd);
4444 }
4445#endif
4446}
4447EXPORT_SYMBOL(pci_clear_mwi);
4448
4449/**
4450 * pci_intx - enables/disables PCI INTx for device dev
4451 * @pdev: the PCI device to operate on
4452 * @enable: boolean: whether to enable or disable PCI INTx
4453 *
David Brazdil0f672f62019-12-10 10:32:29 +00004454 * Enables/disables PCI INTx for device @pdev
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004455 */
4456void pci_intx(struct pci_dev *pdev, int enable)
4457{
4458 u16 pci_command, new;
4459
4460 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4461
4462 if (enable)
4463 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4464 else
4465 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4466
4467 if (new != pci_command) {
4468 struct pci_devres *dr;
4469
4470 pci_write_config_word(pdev, PCI_COMMAND, new);
4471
4472 dr = find_pci_dr(pdev);
4473 if (dr && !dr->restore_intx) {
4474 dr->restore_intx = 1;
4475 dr->orig_intx = !enable;
4476 }
4477 }
4478}
4479EXPORT_SYMBOL_GPL(pci_intx);
4480
4481static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4482{
4483 struct pci_bus *bus = dev->bus;
4484 bool mask_updated = true;
4485 u32 cmd_status_dword;
4486 u16 origcmd, newcmd;
4487 unsigned long flags;
4488 bool irq_pending;
4489
4490 /*
4491 * We do a single dword read to retrieve both command and status.
4492 * Document assumptions that make this possible.
4493 */
4494 BUILD_BUG_ON(PCI_COMMAND % 4);
4495 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4496
4497 raw_spin_lock_irqsave(&pci_lock, flags);
4498
4499 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4500
4501 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4502
4503 /*
4504 * Check interrupt status register to see whether our device
4505 * triggered the interrupt (when masking) or the next IRQ is
4506 * already pending (when unmasking).
4507 */
4508 if (mask != irq_pending) {
4509 mask_updated = false;
4510 goto done;
4511 }
4512
4513 origcmd = cmd_status_dword;
4514 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4515 if (mask)
4516 newcmd |= PCI_COMMAND_INTX_DISABLE;
4517 if (newcmd != origcmd)
4518 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4519
4520done:
4521 raw_spin_unlock_irqrestore(&pci_lock, flags);
4522
4523 return mask_updated;
4524}
4525
4526/**
4527 * pci_check_and_mask_intx - mask INTx on pending interrupt
4528 * @dev: the PCI device to operate on
4529 *
David Brazdil0f672f62019-12-10 10:32:29 +00004530 * Check if the device dev has its INTx line asserted, mask it and return
4531 * true in that case. False is returned if no interrupt was pending.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004532 */
4533bool pci_check_and_mask_intx(struct pci_dev *dev)
4534{
4535 return pci_check_and_set_intx_mask(dev, true);
4536}
4537EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4538
4539/**
4540 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4541 * @dev: the PCI device to operate on
4542 *
David Brazdil0f672f62019-12-10 10:32:29 +00004543 * Check if the device dev has its INTx line asserted, unmask it if not and
4544 * return true. False is returned and the mask remains active if there was
4545 * still an interrupt pending.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004546 */
4547bool pci_check_and_unmask_intx(struct pci_dev *dev)
4548{
4549 return pci_check_and_set_intx_mask(dev, false);
4550}
4551EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4552
4553/**
David Brazdil0f672f62019-12-10 10:32:29 +00004554 * pci_wait_for_pending_transaction - wait for pending transaction
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004555 * @dev: the PCI device to operate on
4556 *
4557 * Return 0 if transaction is pending 1 otherwise.
4558 */
4559int pci_wait_for_pending_transaction(struct pci_dev *dev)
4560{
4561 if (!pci_is_pcie(dev))
4562 return 1;
4563
4564 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4565 PCI_EXP_DEVSTA_TRPND);
4566}
4567EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4568
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004569/**
4570 * pcie_has_flr - check if a device supports function level resets
David Brazdil0f672f62019-12-10 10:32:29 +00004571 * @dev: device to check
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004572 *
4573 * Returns true if the device advertises support for PCIe function level
4574 * resets.
4575 */
4576bool pcie_has_flr(struct pci_dev *dev)
4577{
4578 u32 cap;
4579
4580 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4581 return false;
4582
4583 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4584 return cap & PCI_EXP_DEVCAP_FLR;
4585}
4586EXPORT_SYMBOL_GPL(pcie_has_flr);
4587
4588/**
4589 * pcie_flr - initiate a PCIe function level reset
David Brazdil0f672f62019-12-10 10:32:29 +00004590 * @dev: device to reset
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004591 *
4592 * Initiate a function level reset on @dev. The caller should ensure the
4593 * device supports FLR before calling this function, e.g. by using the
4594 * pcie_has_flr() helper.
4595 */
4596int pcie_flr(struct pci_dev *dev)
4597{
4598 if (!pci_wait_for_pending_transaction(dev))
4599 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4600
4601 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4602
David Brazdil0f672f62019-12-10 10:32:29 +00004603 if (dev->imm_ready)
4604 return 0;
4605
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004606 /*
4607 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4608 * 100ms, but may silently discard requests while the FLR is in
4609 * progress. Wait 100ms before trying to access the device.
4610 */
4611 msleep(100);
4612
4613 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4614}
4615EXPORT_SYMBOL_GPL(pcie_flr);
4616
4617static int pci_af_flr(struct pci_dev *dev, int probe)
4618{
4619 int pos;
4620 u8 cap;
4621
4622 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4623 if (!pos)
4624 return -ENOTTY;
4625
4626 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4627 return -ENOTTY;
4628
4629 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4630 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4631 return -ENOTTY;
4632
4633 if (probe)
4634 return 0;
4635
4636 /*
4637 * Wait for Transaction Pending bit to clear. A word-aligned test
David Brazdil0f672f62019-12-10 10:32:29 +00004638 * is used, so we use the control offset rather than status and shift
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004639 * the test bit to match.
4640 */
4641 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4642 PCI_AF_STATUS_TP << 8))
4643 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4644
4645 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4646
David Brazdil0f672f62019-12-10 10:32:29 +00004647 if (dev->imm_ready)
4648 return 0;
4649
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004650 /*
4651 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4652 * updated 27 July 2006; a device must complete an FLR within
4653 * 100ms, but may silently discard requests while the FLR is in
4654 * progress. Wait 100ms before trying to access the device.
4655 */
4656 msleep(100);
4657
4658 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4659}
4660
4661/**
4662 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4663 * @dev: Device to reset.
4664 * @probe: If set, only check if the device can be reset this way.
4665 *
4666 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4667 * unset, it will be reinitialized internally when going from PCI_D3hot to
4668 * PCI_D0. If that's the case and the device is not in a low-power state
4669 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4670 *
4671 * NOTE: This causes the caller to sleep for twice the device power transition
4672 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
Olivier Deprez157378f2022-04-04 15:47:50 +02004673 * by default (i.e. unless the @dev's d3hot_delay field has a different value).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004674 * Moreover, only devices in D0 can be reset by this function.
4675 */
4676static int pci_pm_reset(struct pci_dev *dev, int probe)
4677{
4678 u16 csr;
4679
4680 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4681 return -ENOTTY;
4682
4683 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4684 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4685 return -ENOTTY;
4686
4687 if (probe)
4688 return 0;
4689
4690 if (dev->current_state != PCI_D0)
4691 return -EINVAL;
4692
4693 csr &= ~PCI_PM_CTRL_STATE_MASK;
4694 csr |= PCI_D3hot;
4695 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4696 pci_dev_d3_sleep(dev);
4697
4698 csr &= ~PCI_PM_CTRL_STATE_MASK;
4699 csr |= PCI_D0;
4700 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4701 pci_dev_d3_sleep(dev);
4702
Olivier Deprez157378f2022-04-04 15:47:50 +02004703 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004704}
Olivier Deprez0e641232021-09-23 10:07:05 +02004705
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004706/**
Olivier Deprez0e641232021-09-23 10:07:05 +02004707 * pcie_wait_for_link_delay - Wait until link is active or inactive
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004708 * @pdev: Bridge device
4709 * @active: waiting for active or inactive?
Olivier Deprez0e641232021-09-23 10:07:05 +02004710 * @delay: Delay to wait after link has become active (in ms)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004711 *
4712 * Use this to wait till link becomes active or inactive.
4713 */
Olivier Deprez0e641232021-09-23 10:07:05 +02004714static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4715 int delay)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004716{
4717 int timeout = 1000;
4718 bool ret;
4719 u16 lnk_status;
4720
David Brazdil0f672f62019-12-10 10:32:29 +00004721 /*
4722 * Some controllers might not implement link active reporting. In this
Olivier Deprez0e641232021-09-23 10:07:05 +02004723 * case, we wait for 1000 ms + any delay requested by the caller.
David Brazdil0f672f62019-12-10 10:32:29 +00004724 */
4725 if (!pdev->link_active_reporting) {
Olivier Deprez0e641232021-09-23 10:07:05 +02004726 msleep(timeout + delay);
David Brazdil0f672f62019-12-10 10:32:29 +00004727 return true;
4728 }
4729
4730 /*
4731 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4732 * after which we should expect an link active if the reset was
4733 * successful. If so, software must wait a minimum 100ms before sending
4734 * configuration requests to devices downstream this port.
4735 *
4736 * If the link fails to activate, either the device was physically
4737 * removed or the link is permanently failed.
4738 */
4739 if (active)
4740 msleep(20);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004741 for (;;) {
4742 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4743 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4744 if (ret == active)
David Brazdil0f672f62019-12-10 10:32:29 +00004745 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004746 if (timeout <= 0)
4747 break;
4748 msleep(10);
4749 timeout -= 10;
4750 }
David Brazdil0f672f62019-12-10 10:32:29 +00004751 if (active && ret)
Olivier Deprez0e641232021-09-23 10:07:05 +02004752 msleep(delay);
Olivier Deprez157378f2022-04-04 15:47:50 +02004753
David Brazdil0f672f62019-12-10 10:32:29 +00004754 return ret == active;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004755}
4756
Olivier Deprez0e641232021-09-23 10:07:05 +02004757/**
4758 * pcie_wait_for_link - Wait until link is active or inactive
4759 * @pdev: Bridge device
4760 * @active: waiting for active or inactive?
4761 *
4762 * Use this to wait till link becomes active or inactive.
4763 */
4764bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4765{
4766 return pcie_wait_for_link_delay(pdev, active, 100);
4767}
4768
4769/*
4770 * Find maximum D3cold delay required by all the devices on the bus. The
4771 * spec says 100 ms, but firmware can lower it and we allow drivers to
4772 * increase it as well.
4773 *
4774 * Called with @pci_bus_sem locked for reading.
4775 */
4776static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4777{
4778 const struct pci_dev *pdev;
4779 int min_delay = 100;
4780 int max_delay = 0;
4781
4782 list_for_each_entry(pdev, &bus->devices, bus_list) {
4783 if (pdev->d3cold_delay < min_delay)
4784 min_delay = pdev->d3cold_delay;
4785 if (pdev->d3cold_delay > max_delay)
4786 max_delay = pdev->d3cold_delay;
4787 }
4788
4789 return max(min_delay, max_delay);
4790}
4791
4792/**
4793 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4794 * @dev: PCI bridge
4795 *
4796 * Handle necessary delays before access to the devices on the secondary
4797 * side of the bridge are permitted after D3cold to D0 transition.
4798 *
4799 * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4800 * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4801 * 4.3.2.
4802 */
4803void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4804{
4805 struct pci_dev *child;
4806 int delay;
4807
4808 if (pci_dev_is_disconnected(dev))
4809 return;
4810
4811 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4812 return;
4813
4814 down_read(&pci_bus_sem);
4815
4816 /*
4817 * We only deal with devices that are present currently on the bus.
4818 * For any hot-added devices the access delay is handled in pciehp
4819 * board_added(). In case of ACPI hotplug the firmware is expected
4820 * to configure the devices before OS is notified.
4821 */
4822 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4823 up_read(&pci_bus_sem);
4824 return;
4825 }
4826
4827 /* Take d3cold_delay requirements into account */
4828 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4829 if (!delay) {
4830 up_read(&pci_bus_sem);
4831 return;
4832 }
4833
4834 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4835 bus_list);
4836 up_read(&pci_bus_sem);
4837
4838 /*
4839 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4840 * accessing the device after reset (that is 1000 ms + 100 ms). In
4841 * practice this should not be needed because we don't do power
4842 * management for them (see pci_bridge_d3_possible()).
4843 */
4844 if (!pci_is_pcie(dev)) {
4845 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4846 msleep(1000 + delay);
4847 return;
4848 }
4849
4850 /*
4851 * For PCIe downstream and root ports that do not support speeds
4852 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4853 * speeds (gen3) we need to wait first for the data link layer to
4854 * become active.
4855 *
4856 * However, 100 ms is the minimum and the PCIe spec says the
4857 * software must allow at least 1s before it can determine that the
4858 * device that did not respond is a broken device. There is
4859 * evidence that 100 ms is not always enough, for example certain
4860 * Titan Ridge xHCI controller does not always respond to
4861 * configuration requests if we only wait for 100 ms (see
4862 * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
4863 *
4864 * Therefore we wait for 100 ms and check for the device presence.
4865 * If it is still not present give it an additional 100 ms.
4866 */
4867 if (!pcie_downstream_port(dev))
4868 return;
4869
4870 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4871 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4872 msleep(delay);
4873 } else {
4874 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4875 delay);
4876 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4877 /* Did not train, no need to wait any further */
Olivier Deprez157378f2022-04-04 15:47:50 +02004878 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
Olivier Deprez0e641232021-09-23 10:07:05 +02004879 return;
4880 }
4881 }
4882
4883 if (!pci_device_is_present(child)) {
4884 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4885 msleep(delay);
4886 }
4887}
4888
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004889void pci_reset_secondary_bus(struct pci_dev *dev)
4890{
4891 u16 ctrl;
4892
4893 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4894 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4895 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4896
4897 /*
4898 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
4899 * this to 2ms to ensure that we meet the minimum requirement.
4900 */
4901 msleep(2);
4902
4903 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4904 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4905
4906 /*
4907 * Trhfa for conventional PCI is 2^25 clock cycles.
4908 * Assuming a minimum 33MHz clock this results in a 1s
4909 * delay before we can consider subordinate devices to
4910 * be re-initialized. PCIe has some ways to shorten this,
4911 * but we don't make use of them yet.
4912 */
4913 ssleep(1);
4914}
4915
4916void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4917{
4918 pci_reset_secondary_bus(dev);
4919}
4920
4921/**
4922 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4923 * @dev: Bridge device
4924 *
4925 * Use the bridge control register to assert reset on the secondary bus.
4926 * Devices on the secondary bus are left in power-on state.
4927 */
4928int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4929{
4930 pcibios_reset_secondary_bus(dev);
4931
4932 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4933}
4934EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4935
4936static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4937{
4938 struct pci_dev *pdev;
4939
4940 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4941 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4942 return -ENOTTY;
4943
4944 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4945 if (pdev != dev)
4946 return -ENOTTY;
4947
4948 if (probe)
4949 return 0;
4950
4951 return pci_bridge_secondary_bus_reset(dev->bus->self);
4952}
4953
4954static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4955{
4956 int rc = -ENOTTY;
4957
David Brazdil0f672f62019-12-10 10:32:29 +00004958 if (!hotplug || !try_module_get(hotplug->owner))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004959 return rc;
4960
4961 if (hotplug->ops->reset_slot)
4962 rc = hotplug->ops->reset_slot(hotplug, probe);
4963
David Brazdil0f672f62019-12-10 10:32:29 +00004964 module_put(hotplug->owner);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004965
4966 return rc;
4967}
4968
4969static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4970{
Olivier Deprez157378f2022-04-04 15:47:50 +02004971 if (dev->multifunction || dev->subordinate || !dev->slot ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004972 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4973 return -ENOTTY;
4974
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004975 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4976}
4977
4978static void pci_dev_lock(struct pci_dev *dev)
4979{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004980 /* block PM suspend, driver probe, etc. */
4981 device_lock(&dev->dev);
Olivier Deprez92d4c212022-12-06 15:05:30 +01004982 pci_cfg_access_lock(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004983}
4984
4985/* Return 1 on successful lock, 0 on contention */
4986static int pci_dev_trylock(struct pci_dev *dev)
4987{
Olivier Deprez92d4c212022-12-06 15:05:30 +01004988 if (device_trylock(&dev->dev)) {
4989 if (pci_cfg_access_trylock(dev))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004990 return 1;
Olivier Deprez92d4c212022-12-06 15:05:30 +01004991 device_unlock(&dev->dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004992 }
4993
4994 return 0;
4995}
4996
4997static void pci_dev_unlock(struct pci_dev *dev)
4998{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004999 pci_cfg_access_unlock(dev);
Olivier Deprez92d4c212022-12-06 15:05:30 +01005000 device_unlock(&dev->dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005001}
5002
5003static void pci_dev_save_and_disable(struct pci_dev *dev)
5004{
5005 const struct pci_error_handlers *err_handler =
5006 dev->driver ? dev->driver->err_handler : NULL;
5007
5008 /*
5009 * dev->driver->err_handler->reset_prepare() is protected against
5010 * races with ->remove() by the device lock, which must be held by
5011 * the caller.
5012 */
5013 if (err_handler && err_handler->reset_prepare)
5014 err_handler->reset_prepare(dev);
5015
5016 /*
5017 * Wake-up device prior to save. PM registers default to D0 after
5018 * reset and a simple register restore doesn't reliably return
5019 * to a non-D0 state anyway.
5020 */
5021 pci_set_power_state(dev, PCI_D0);
5022
5023 pci_save_state(dev);
5024 /*
5025 * Disable the device by clearing the Command register, except for
5026 * INTx-disable which is set. This not only disables MMIO and I/O port
5027 * BARs, but also prevents the device from being Bus Master, preventing
5028 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
5029 * compliant devices, INTx-disable prevents legacy interrupts.
5030 */
5031 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5032}
5033
5034static void pci_dev_restore(struct pci_dev *dev)
5035{
5036 const struct pci_error_handlers *err_handler =
5037 dev->driver ? dev->driver->err_handler : NULL;
5038
5039 pci_restore_state(dev);
5040
5041 /*
5042 * dev->driver->err_handler->reset_done() is protected against
5043 * races with ->remove() by the device lock, which must be held by
5044 * the caller.
5045 */
5046 if (err_handler && err_handler->reset_done)
5047 err_handler->reset_done(dev);
5048}
5049
5050/**
5051 * __pci_reset_function_locked - reset a PCI device function while holding
5052 * the @dev mutex lock.
5053 * @dev: PCI device to reset
5054 *
5055 * Some devices allow an individual function to be reset without affecting
5056 * other functions in the same device. The PCI device must be responsive
5057 * to PCI config space in order to use this function.
5058 *
5059 * The device function is presumed to be unused and the caller is holding
5060 * the device mutex lock when this function is called.
David Brazdil0f672f62019-12-10 10:32:29 +00005061 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005062 * Resetting the device will make the contents of PCI configuration space
5063 * random, so any caller of this must be prepared to reinitialise the
5064 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5065 * etc.
5066 *
5067 * Returns 0 if the device function was successfully reset or negative if the
5068 * device doesn't support resetting a single function.
5069 */
5070int __pci_reset_function_locked(struct pci_dev *dev)
5071{
5072 int rc;
5073
5074 might_sleep();
5075
5076 /*
5077 * A reset method returns -ENOTTY if it doesn't support this device
5078 * and we should try the next method.
5079 *
5080 * If it returns 0 (success), we're finished. If it returns any
5081 * other error, we're also finished: this indicates that further
5082 * reset mechanisms might be broken on the device.
5083 */
5084 rc = pci_dev_specific_reset(dev, 0);
5085 if (rc != -ENOTTY)
5086 return rc;
5087 if (pcie_has_flr(dev)) {
5088 rc = pcie_flr(dev);
5089 if (rc != -ENOTTY)
5090 return rc;
5091 }
5092 rc = pci_af_flr(dev, 0);
5093 if (rc != -ENOTTY)
5094 return rc;
5095 rc = pci_pm_reset(dev, 0);
5096 if (rc != -ENOTTY)
5097 return rc;
5098 rc = pci_dev_reset_slot_function(dev, 0);
5099 if (rc != -ENOTTY)
5100 return rc;
5101 return pci_parent_bus_reset(dev, 0);
5102}
5103EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5104
5105/**
5106 * pci_probe_reset_function - check whether the device can be safely reset
5107 * @dev: PCI device to reset
5108 *
5109 * Some devices allow an individual function to be reset without affecting
5110 * other functions in the same device. The PCI device must be responsive
5111 * to PCI config space in order to use this function.
5112 *
5113 * Returns 0 if the device function can be reset or negative if the
5114 * device doesn't support resetting a single function.
5115 */
5116int pci_probe_reset_function(struct pci_dev *dev)
5117{
5118 int rc;
5119
5120 might_sleep();
5121
5122 rc = pci_dev_specific_reset(dev, 1);
5123 if (rc != -ENOTTY)
5124 return rc;
5125 if (pcie_has_flr(dev))
5126 return 0;
5127 rc = pci_af_flr(dev, 1);
5128 if (rc != -ENOTTY)
5129 return rc;
5130 rc = pci_pm_reset(dev, 1);
5131 if (rc != -ENOTTY)
5132 return rc;
5133 rc = pci_dev_reset_slot_function(dev, 1);
5134 if (rc != -ENOTTY)
5135 return rc;
5136
5137 return pci_parent_bus_reset(dev, 1);
5138}
5139
5140/**
5141 * pci_reset_function - quiesce and reset a PCI device function
5142 * @dev: PCI device to reset
5143 *
5144 * Some devices allow an individual function to be reset without affecting
5145 * other functions in the same device. The PCI device must be responsive
5146 * to PCI config space in order to use this function.
5147 *
5148 * This function does not just reset the PCI portion of a device, but
5149 * clears all the state associated with the device. This function differs
5150 * from __pci_reset_function_locked() in that it saves and restores device state
5151 * over the reset and takes the PCI device lock.
5152 *
5153 * Returns 0 if the device function was successfully reset or negative if the
5154 * device doesn't support resetting a single function.
5155 */
5156int pci_reset_function(struct pci_dev *dev)
5157{
5158 int rc;
5159
5160 if (!dev->reset_fn)
5161 return -ENOTTY;
5162
5163 pci_dev_lock(dev);
5164 pci_dev_save_and_disable(dev);
5165
5166 rc = __pci_reset_function_locked(dev);
5167
5168 pci_dev_restore(dev);
5169 pci_dev_unlock(dev);
5170
5171 return rc;
5172}
5173EXPORT_SYMBOL_GPL(pci_reset_function);
5174
5175/**
5176 * pci_reset_function_locked - quiesce and reset a PCI device function
5177 * @dev: PCI device to reset
5178 *
5179 * Some devices allow an individual function to be reset without affecting
5180 * other functions in the same device. The PCI device must be responsive
5181 * to PCI config space in order to use this function.
5182 *
5183 * This function does not just reset the PCI portion of a device, but
5184 * clears all the state associated with the device. This function differs
5185 * from __pci_reset_function_locked() in that it saves and restores device state
5186 * over the reset. It also differs from pci_reset_function() in that it
5187 * requires the PCI device lock to be held.
5188 *
5189 * Returns 0 if the device function was successfully reset or negative if the
5190 * device doesn't support resetting a single function.
5191 */
5192int pci_reset_function_locked(struct pci_dev *dev)
5193{
5194 int rc;
5195
5196 if (!dev->reset_fn)
5197 return -ENOTTY;
5198
5199 pci_dev_save_and_disable(dev);
5200
5201 rc = __pci_reset_function_locked(dev);
5202
5203 pci_dev_restore(dev);
5204
5205 return rc;
5206}
5207EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5208
5209/**
5210 * pci_try_reset_function - quiesce and reset a PCI device function
5211 * @dev: PCI device to reset
5212 *
5213 * Same as above, except return -EAGAIN if unable to lock device.
5214 */
5215int pci_try_reset_function(struct pci_dev *dev)
5216{
5217 int rc;
5218
5219 if (!dev->reset_fn)
5220 return -ENOTTY;
5221
5222 if (!pci_dev_trylock(dev))
5223 return -EAGAIN;
5224
5225 pci_dev_save_and_disable(dev);
5226 rc = __pci_reset_function_locked(dev);
5227 pci_dev_restore(dev);
5228 pci_dev_unlock(dev);
5229
5230 return rc;
5231}
5232EXPORT_SYMBOL_GPL(pci_try_reset_function);
5233
5234/* Do any devices on or below this bus prevent a bus reset? */
5235static bool pci_bus_resetable(struct pci_bus *bus)
5236{
5237 struct pci_dev *dev;
5238
5239
5240 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5241 return false;
5242
5243 list_for_each_entry(dev, &bus->devices, bus_list) {
5244 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5245 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5246 return false;
5247 }
5248
5249 return true;
5250}
5251
5252/* Lock devices from the top of the tree down */
5253static void pci_bus_lock(struct pci_bus *bus)
5254{
5255 struct pci_dev *dev;
5256
5257 list_for_each_entry(dev, &bus->devices, bus_list) {
5258 pci_dev_lock(dev);
5259 if (dev->subordinate)
5260 pci_bus_lock(dev->subordinate);
5261 }
5262}
5263
5264/* Unlock devices from the bottom of the tree up */
5265static void pci_bus_unlock(struct pci_bus *bus)
5266{
5267 struct pci_dev *dev;
5268
5269 list_for_each_entry(dev, &bus->devices, bus_list) {
5270 if (dev->subordinate)
5271 pci_bus_unlock(dev->subordinate);
5272 pci_dev_unlock(dev);
5273 }
5274}
5275
5276/* Return 1 on successful lock, 0 on contention */
5277static int pci_bus_trylock(struct pci_bus *bus)
5278{
5279 struct pci_dev *dev;
5280
5281 list_for_each_entry(dev, &bus->devices, bus_list) {
5282 if (!pci_dev_trylock(dev))
5283 goto unlock;
5284 if (dev->subordinate) {
5285 if (!pci_bus_trylock(dev->subordinate)) {
5286 pci_dev_unlock(dev);
5287 goto unlock;
5288 }
5289 }
5290 }
5291 return 1;
5292
5293unlock:
5294 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5295 if (dev->subordinate)
5296 pci_bus_unlock(dev->subordinate);
5297 pci_dev_unlock(dev);
5298 }
5299 return 0;
5300}
5301
5302/* Do any devices on or below this slot prevent a bus reset? */
5303static bool pci_slot_resetable(struct pci_slot *slot)
5304{
5305 struct pci_dev *dev;
5306
5307 if (slot->bus->self &&
5308 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5309 return false;
5310
5311 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5312 if (!dev->slot || dev->slot != slot)
5313 continue;
5314 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5315 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5316 return false;
5317 }
5318
5319 return true;
5320}
5321
5322/* Lock devices from the top of the tree down */
5323static void pci_slot_lock(struct pci_slot *slot)
5324{
5325 struct pci_dev *dev;
5326
5327 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5328 if (!dev->slot || dev->slot != slot)
5329 continue;
5330 pci_dev_lock(dev);
5331 if (dev->subordinate)
5332 pci_bus_lock(dev->subordinate);
5333 }
5334}
5335
5336/* Unlock devices from the bottom of the tree up */
5337static void pci_slot_unlock(struct pci_slot *slot)
5338{
5339 struct pci_dev *dev;
5340
5341 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5342 if (!dev->slot || dev->slot != slot)
5343 continue;
5344 if (dev->subordinate)
5345 pci_bus_unlock(dev->subordinate);
5346 pci_dev_unlock(dev);
5347 }
5348}
5349
5350/* Return 1 on successful lock, 0 on contention */
5351static int pci_slot_trylock(struct pci_slot *slot)
5352{
5353 struct pci_dev *dev;
5354
5355 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5356 if (!dev->slot || dev->slot != slot)
5357 continue;
5358 if (!pci_dev_trylock(dev))
5359 goto unlock;
5360 if (dev->subordinate) {
5361 if (!pci_bus_trylock(dev->subordinate)) {
5362 pci_dev_unlock(dev);
5363 goto unlock;
5364 }
5365 }
5366 }
5367 return 1;
5368
5369unlock:
5370 list_for_each_entry_continue_reverse(dev,
5371 &slot->bus->devices, bus_list) {
5372 if (!dev->slot || dev->slot != slot)
5373 continue;
5374 if (dev->subordinate)
5375 pci_bus_unlock(dev->subordinate);
5376 pci_dev_unlock(dev);
5377 }
5378 return 0;
5379}
5380
David Brazdil0f672f62019-12-10 10:32:29 +00005381/*
5382 * Save and disable devices from the top of the tree down while holding
5383 * the @dev mutex lock for the entire tree.
5384 */
5385static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005386{
5387 struct pci_dev *dev;
5388
5389 list_for_each_entry(dev, &bus->devices, bus_list) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005390 pci_dev_save_and_disable(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005391 if (dev->subordinate)
David Brazdil0f672f62019-12-10 10:32:29 +00005392 pci_bus_save_and_disable_locked(dev->subordinate);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005393 }
5394}
5395
5396/*
David Brazdil0f672f62019-12-10 10:32:29 +00005397 * Restore devices from top of the tree down while holding @dev mutex lock
5398 * for the entire tree. Parent bridges need to be restored before we can
5399 * get to subordinate devices.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005400 */
David Brazdil0f672f62019-12-10 10:32:29 +00005401static void pci_bus_restore_locked(struct pci_bus *bus)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005402{
5403 struct pci_dev *dev;
5404
5405 list_for_each_entry(dev, &bus->devices, bus_list) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005406 pci_dev_restore(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005407 if (dev->subordinate)
David Brazdil0f672f62019-12-10 10:32:29 +00005408 pci_bus_restore_locked(dev->subordinate);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005409 }
5410}
5411
David Brazdil0f672f62019-12-10 10:32:29 +00005412/*
5413 * Save and disable devices from the top of the tree down while holding
5414 * the @dev mutex lock for the entire tree.
5415 */
5416static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005417{
5418 struct pci_dev *dev;
5419
5420 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5421 if (!dev->slot || dev->slot != slot)
5422 continue;
5423 pci_dev_save_and_disable(dev);
5424 if (dev->subordinate)
David Brazdil0f672f62019-12-10 10:32:29 +00005425 pci_bus_save_and_disable_locked(dev->subordinate);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005426 }
5427}
5428
5429/*
David Brazdil0f672f62019-12-10 10:32:29 +00005430 * Restore devices from top of the tree down while holding @dev mutex lock
5431 * for the entire tree. Parent bridges need to be restored before we can
5432 * get to subordinate devices.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005433 */
David Brazdil0f672f62019-12-10 10:32:29 +00005434static void pci_slot_restore_locked(struct pci_slot *slot)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005435{
5436 struct pci_dev *dev;
5437
5438 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5439 if (!dev->slot || dev->slot != slot)
5440 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005441 pci_dev_restore(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005442 if (dev->subordinate)
David Brazdil0f672f62019-12-10 10:32:29 +00005443 pci_bus_restore_locked(dev->subordinate);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005444 }
5445}
5446
5447static int pci_slot_reset(struct pci_slot *slot, int probe)
5448{
5449 int rc;
5450
5451 if (!slot || !pci_slot_resetable(slot))
5452 return -ENOTTY;
5453
5454 if (!probe)
5455 pci_slot_lock(slot);
5456
5457 might_sleep();
5458
5459 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5460
5461 if (!probe)
5462 pci_slot_unlock(slot);
5463
5464 return rc;
5465}
5466
5467/**
5468 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5469 * @slot: PCI slot to probe
5470 *
5471 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5472 */
5473int pci_probe_reset_slot(struct pci_slot *slot)
5474{
5475 return pci_slot_reset(slot, 1);
5476}
5477EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5478
5479/**
5480 * __pci_reset_slot - Try to reset a PCI slot
5481 * @slot: PCI slot to reset
5482 *
5483 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5484 * independent of other slots. For instance, some slots may support slot power
5485 * control. In the case of a 1:1 bus to slot architecture, this function may
5486 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5487 * Generally a slot reset should be attempted before a bus reset. All of the
5488 * function of the slot and any subordinate buses behind the slot are reset
5489 * through this function. PCI config space of all devices in the slot and
5490 * behind the slot is saved before and restored after reset.
5491 *
5492 * Same as above except return -EAGAIN if the slot cannot be locked
5493 */
5494static int __pci_reset_slot(struct pci_slot *slot)
5495{
5496 int rc;
5497
5498 rc = pci_slot_reset(slot, 1);
5499 if (rc)
5500 return rc;
5501
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005502 if (pci_slot_trylock(slot)) {
David Brazdil0f672f62019-12-10 10:32:29 +00005503 pci_slot_save_and_disable_locked(slot);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005504 might_sleep();
5505 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00005506 pci_slot_restore_locked(slot);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005507 pci_slot_unlock(slot);
5508 } else
5509 rc = -EAGAIN;
5510
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005511 return rc;
5512}
5513
5514static int pci_bus_reset(struct pci_bus *bus, int probe)
5515{
5516 int ret;
5517
5518 if (!bus->self || !pci_bus_resetable(bus))
5519 return -ENOTTY;
5520
5521 if (probe)
5522 return 0;
5523
5524 pci_bus_lock(bus);
5525
5526 might_sleep();
5527
5528 ret = pci_bridge_secondary_bus_reset(bus->self);
5529
5530 pci_bus_unlock(bus);
5531
5532 return ret;
5533}
5534
5535/**
David Brazdil0f672f62019-12-10 10:32:29 +00005536 * pci_bus_error_reset - reset the bridge's subordinate bus
5537 * @bridge: The parent device that connects to the bus to reset
5538 *
5539 * This function will first try to reset the slots on this bus if the method is
5540 * available. If slot reset fails or is not available, this will fall back to a
5541 * secondary bus reset.
5542 */
5543int pci_bus_error_reset(struct pci_dev *bridge)
5544{
5545 struct pci_bus *bus = bridge->subordinate;
5546 struct pci_slot *slot;
5547
5548 if (!bus)
5549 return -ENOTTY;
5550
5551 mutex_lock(&pci_slot_mutex);
5552 if (list_empty(&bus->slots))
5553 goto bus_reset;
5554
5555 list_for_each_entry(slot, &bus->slots, list)
5556 if (pci_probe_reset_slot(slot))
5557 goto bus_reset;
5558
5559 list_for_each_entry(slot, &bus->slots, list)
5560 if (pci_slot_reset(slot, 0))
5561 goto bus_reset;
5562
5563 mutex_unlock(&pci_slot_mutex);
5564 return 0;
5565bus_reset:
5566 mutex_unlock(&pci_slot_mutex);
5567 return pci_bus_reset(bridge->subordinate, 0);
5568}
5569
5570/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005571 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5572 * @bus: PCI bus to probe
5573 *
5574 * Return 0 if bus can be reset, negative if a bus reset is not supported.
5575 */
5576int pci_probe_reset_bus(struct pci_bus *bus)
5577{
5578 return pci_bus_reset(bus, 1);
5579}
5580EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5581
5582/**
5583 * __pci_reset_bus - Try to reset a PCI bus
5584 * @bus: top level PCI bus to reset
5585 *
5586 * Same as above except return -EAGAIN if the bus cannot be locked
5587 */
5588static int __pci_reset_bus(struct pci_bus *bus)
5589{
5590 int rc;
5591
5592 rc = pci_bus_reset(bus, 1);
5593 if (rc)
5594 return rc;
5595
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005596 if (pci_bus_trylock(bus)) {
David Brazdil0f672f62019-12-10 10:32:29 +00005597 pci_bus_save_and_disable_locked(bus);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005598 might_sleep();
5599 rc = pci_bridge_secondary_bus_reset(bus->self);
David Brazdil0f672f62019-12-10 10:32:29 +00005600 pci_bus_restore_locked(bus);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005601 pci_bus_unlock(bus);
5602 } else
5603 rc = -EAGAIN;
5604
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005605 return rc;
5606}
5607
5608/**
5609 * pci_reset_bus - Try to reset a PCI bus
5610 * @pdev: top level PCI device to reset via slot/bus
5611 *
5612 * Same as above except return -EAGAIN if the bus cannot be locked
5613 */
5614int pci_reset_bus(struct pci_dev *pdev)
5615{
5616 return (!pci_probe_reset_slot(pdev->slot)) ?
5617 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5618}
5619EXPORT_SYMBOL_GPL(pci_reset_bus);
5620
5621/**
5622 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5623 * @dev: PCI device to query
5624 *
David Brazdil0f672f62019-12-10 10:32:29 +00005625 * Returns mmrbc: maximum designed memory read count in bytes or
5626 * appropriate error value.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005627 */
5628int pcix_get_max_mmrbc(struct pci_dev *dev)
5629{
5630 int cap;
5631 u32 stat;
5632
5633 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5634 if (!cap)
5635 return -EINVAL;
5636
5637 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5638 return -EINVAL;
5639
5640 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5641}
5642EXPORT_SYMBOL(pcix_get_max_mmrbc);
5643
5644/**
5645 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5646 * @dev: PCI device to query
5647 *
David Brazdil0f672f62019-12-10 10:32:29 +00005648 * Returns mmrbc: maximum memory read count in bytes or appropriate error
5649 * value.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005650 */
5651int pcix_get_mmrbc(struct pci_dev *dev)
5652{
5653 int cap;
5654 u16 cmd;
5655
5656 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5657 if (!cap)
5658 return -EINVAL;
5659
5660 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5661 return -EINVAL;
5662
5663 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5664}
5665EXPORT_SYMBOL(pcix_get_mmrbc);
5666
5667/**
5668 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5669 * @dev: PCI device to query
5670 * @mmrbc: maximum memory read count in bytes
5671 * valid values are 512, 1024, 2048, 4096
5672 *
David Brazdil0f672f62019-12-10 10:32:29 +00005673 * If possible sets maximum memory read byte count, some bridges have errata
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005674 * that prevent this.
5675 */
5676int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5677{
5678 int cap;
5679 u32 stat, v, o;
5680 u16 cmd;
5681
5682 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5683 return -EINVAL;
5684
5685 v = ffs(mmrbc) - 10;
5686
5687 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5688 if (!cap)
5689 return -EINVAL;
5690
5691 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5692 return -EINVAL;
5693
5694 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5695 return -E2BIG;
5696
5697 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5698 return -EINVAL;
5699
5700 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5701 if (o != v) {
5702 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5703 return -EIO;
5704
5705 cmd &= ~PCI_X_CMD_MAX_READ;
5706 cmd |= v << 2;
5707 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5708 return -EIO;
5709 }
5710 return 0;
5711}
5712EXPORT_SYMBOL(pcix_set_mmrbc);
5713
5714/**
5715 * pcie_get_readrq - get PCI Express read request size
5716 * @dev: PCI device to query
5717 *
David Brazdil0f672f62019-12-10 10:32:29 +00005718 * Returns maximum memory read request in bytes or appropriate error value.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005719 */
5720int pcie_get_readrq(struct pci_dev *dev)
5721{
5722 u16 ctl;
5723
5724 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5725
5726 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5727}
5728EXPORT_SYMBOL(pcie_get_readrq);
5729
5730/**
5731 * pcie_set_readrq - set PCI Express maximum memory read request
5732 * @dev: PCI device to query
5733 * @rq: maximum memory read count in bytes
5734 * valid values are 128, 256, 512, 1024, 2048, 4096
5735 *
5736 * If possible sets maximum memory read request in bytes
5737 */
5738int pcie_set_readrq(struct pci_dev *dev, int rq)
5739{
5740 u16 v;
Olivier Deprez157378f2022-04-04 15:47:50 +02005741 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005742
5743 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5744 return -EINVAL;
5745
5746 /*
David Brazdil0f672f62019-12-10 10:32:29 +00005747 * If using the "performance" PCIe config, we clamp the read rq
5748 * size to the max packet size to keep the host bridge from
5749 * generating requests larger than we can cope with.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005750 */
5751 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5752 int mps = pcie_get_mps(dev);
5753
5754 if (mps < rq)
5755 rq = mps;
5756 }
5757
5758 v = (ffs(rq) - 8) << 12;
5759
Olivier Deprez157378f2022-04-04 15:47:50 +02005760 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005761 PCI_EXP_DEVCTL_READRQ, v);
Olivier Deprez157378f2022-04-04 15:47:50 +02005762
5763 return pcibios_err_to_errno(ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005764}
5765EXPORT_SYMBOL(pcie_set_readrq);
5766
5767/**
5768 * pcie_get_mps - get PCI Express maximum payload size
5769 * @dev: PCI device to query
5770 *
5771 * Returns maximum payload size in bytes
5772 */
5773int pcie_get_mps(struct pci_dev *dev)
5774{
5775 u16 ctl;
5776
5777 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5778
5779 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5780}
5781EXPORT_SYMBOL(pcie_get_mps);
5782
5783/**
5784 * pcie_set_mps - set PCI Express maximum payload size
5785 * @dev: PCI device to query
5786 * @mps: maximum payload size in bytes
5787 * valid values are 128, 256, 512, 1024, 2048, 4096
5788 *
5789 * If possible sets maximum payload size
5790 */
5791int pcie_set_mps(struct pci_dev *dev, int mps)
5792{
5793 u16 v;
Olivier Deprez157378f2022-04-04 15:47:50 +02005794 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005795
5796 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5797 return -EINVAL;
5798
5799 v = ffs(mps) - 8;
5800 if (v > dev->pcie_mpss)
5801 return -EINVAL;
5802 v <<= 5;
5803
Olivier Deprez157378f2022-04-04 15:47:50 +02005804 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005805 PCI_EXP_DEVCTL_PAYLOAD, v);
Olivier Deprez157378f2022-04-04 15:47:50 +02005806
5807 return pcibios_err_to_errno(ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005808}
5809EXPORT_SYMBOL(pcie_set_mps);
5810
5811/**
5812 * pcie_bandwidth_available - determine minimum link settings of a PCIe
5813 * device and its bandwidth limitation
5814 * @dev: PCI device to query
5815 * @limiting_dev: storage for device causing the bandwidth limitation
5816 * @speed: storage for speed of limiting device
5817 * @width: storage for width of limiting device
5818 *
5819 * Walk up the PCI device chain and find the point where the minimum
5820 * bandwidth is available. Return the bandwidth available there and (if
5821 * limiting_dev, speed, and width pointers are supplied) information about
5822 * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of
5823 * raw bandwidth.
5824 */
5825u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5826 enum pci_bus_speed *speed,
5827 enum pcie_link_width *width)
5828{
5829 u16 lnksta;
5830 enum pci_bus_speed next_speed;
5831 enum pcie_link_width next_width;
5832 u32 bw, next_bw;
5833
5834 if (speed)
5835 *speed = PCI_SPEED_UNKNOWN;
5836 if (width)
5837 *width = PCIE_LNK_WIDTH_UNKNOWN;
5838
5839 bw = 0;
5840
5841 while (dev) {
5842 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5843
5844 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5845 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5846 PCI_EXP_LNKSTA_NLW_SHIFT;
5847
5848 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5849
5850 /* Check if current device limits the total bandwidth */
5851 if (!bw || next_bw <= bw) {
5852 bw = next_bw;
5853
5854 if (limiting_dev)
5855 *limiting_dev = dev;
5856 if (speed)
5857 *speed = next_speed;
5858 if (width)
5859 *width = next_width;
5860 }
5861
5862 dev = pci_upstream_bridge(dev);
5863 }
5864
5865 return bw;
5866}
5867EXPORT_SYMBOL(pcie_bandwidth_available);
5868
5869/**
5870 * pcie_get_speed_cap - query for the PCI device's link speed capability
5871 * @dev: PCI device to query
5872 *
5873 * Query the PCI device speed capability. Return the maximum link speed
5874 * supported by the device.
5875 */
5876enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5877{
5878 u32 lnkcap2, lnkcap;
5879
5880 /*
5881 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
5882 * implementation note there recommends using the Supported Link
5883 * Speeds Vector in Link Capabilities 2 when supported.
5884 *
5885 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
5886 * should use the Supported Link Speeds field in Link Capabilities,
5887 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
5888 */
5889 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
Olivier Deprez157378f2022-04-04 15:47:50 +02005890
5891 /* PCIe r3.0-compliant */
5892 if (lnkcap2)
5893 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005894
5895 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5896 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5897 return PCIE_SPEED_5_0GT;
5898 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5899 return PCIE_SPEED_2_5GT;
5900
5901 return PCI_SPEED_UNKNOWN;
5902}
5903EXPORT_SYMBOL(pcie_get_speed_cap);
5904
5905/**
5906 * pcie_get_width_cap - query for the PCI device's link width capability
5907 * @dev: PCI device to query
5908 *
5909 * Query the PCI device width capability. Return the maximum link width
5910 * supported by the device.
5911 */
5912enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5913{
5914 u32 lnkcap;
5915
5916 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5917 if (lnkcap)
5918 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5919
5920 return PCIE_LNK_WIDTH_UNKNOWN;
5921}
5922EXPORT_SYMBOL(pcie_get_width_cap);
5923
5924/**
5925 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
5926 * @dev: PCI device
5927 * @speed: storage for link speed
5928 * @width: storage for link width
5929 *
5930 * Calculate a PCI device's link bandwidth by querying for its link speed
5931 * and width, multiplying them, and applying encoding overhead. The result
5932 * is in Mb/s, i.e., megabits/second of raw bandwidth.
5933 */
5934u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5935 enum pcie_link_width *width)
5936{
5937 *speed = pcie_get_speed_cap(dev);
5938 *width = pcie_get_width_cap(dev);
5939
5940 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5941 return 0;
5942
5943 return *width * PCIE_SPEED2MBS_ENC(*speed);
5944}
5945
5946/**
5947 * __pcie_print_link_status - Report the PCI device's link speed and width
5948 * @dev: PCI device to query
5949 * @verbose: Print info even when enough bandwidth is available
5950 *
5951 * If the available bandwidth at the device is less than the device is
5952 * capable of, report the device's maximum possible bandwidth and the
5953 * upstream link that limits its performance. If @verbose, always print
5954 * the available bandwidth, even if the device isn't constrained.
5955 */
5956void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5957{
5958 enum pcie_link_width width, width_cap;
5959 enum pci_bus_speed speed, speed_cap;
5960 struct pci_dev *limiting_dev = NULL;
5961 u32 bw_avail, bw_cap;
5962
5963 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5964 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5965
5966 if (bw_avail >= bw_cap && verbose)
5967 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5968 bw_cap / 1000, bw_cap % 1000,
Olivier Deprez157378f2022-04-04 15:47:50 +02005969 pci_speed_string(speed_cap), width_cap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005970 else if (bw_avail < bw_cap)
5971 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5972 bw_avail / 1000, bw_avail % 1000,
Olivier Deprez157378f2022-04-04 15:47:50 +02005973 pci_speed_string(speed), width,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005974 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5975 bw_cap / 1000, bw_cap % 1000,
Olivier Deprez157378f2022-04-04 15:47:50 +02005976 pci_speed_string(speed_cap), width_cap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005977}
5978
5979/**
5980 * pcie_print_link_status - Report the PCI device's link speed and width
5981 * @dev: PCI device to query
5982 *
5983 * Report the available bandwidth at the device.
5984 */
5985void pcie_print_link_status(struct pci_dev *dev)
5986{
5987 __pcie_print_link_status(dev, true);
5988}
5989EXPORT_SYMBOL(pcie_print_link_status);
5990
5991/**
5992 * pci_select_bars - Make BAR mask from the type of resource
5993 * @dev: the PCI device for which BAR mask is made
5994 * @flags: resource type mask to be selected
5995 *
5996 * This helper routine makes bar mask from the type of resource.
5997 */
5998int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5999{
6000 int i, bars = 0;
6001 for (i = 0; i < PCI_NUM_RESOURCES; i++)
6002 if (pci_resource_flags(dev, i) & flags)
6003 bars |= (1 << i);
6004 return bars;
6005}
6006EXPORT_SYMBOL(pci_select_bars);
6007
6008/* Some architectures require additional programming to enable VGA */
6009static arch_set_vga_state_t arch_set_vga_state;
6010
6011void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6012{
6013 arch_set_vga_state = func; /* NULL disables */
6014}
6015
6016static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6017 unsigned int command_bits, u32 flags)
6018{
6019 if (arch_set_vga_state)
6020 return arch_set_vga_state(dev, decode, command_bits,
6021 flags);
6022 return 0;
6023}
6024
6025/**
6026 * pci_set_vga_state - set VGA decode state on device and parents if requested
6027 * @dev: the PCI device
6028 * @decode: true = enable decoding, false = disable decoding
6029 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6030 * @flags: traverse ancestors and change bridges
6031 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6032 */
6033int pci_set_vga_state(struct pci_dev *dev, bool decode,
6034 unsigned int command_bits, u32 flags)
6035{
6036 struct pci_bus *bus;
6037 struct pci_dev *bridge;
6038 u16 cmd;
6039 int rc;
6040
6041 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6042
6043 /* ARCH specific VGA enables */
6044 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6045 if (rc)
6046 return rc;
6047
6048 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6049 pci_read_config_word(dev, PCI_COMMAND, &cmd);
Olivier Deprez157378f2022-04-04 15:47:50 +02006050 if (decode)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006051 cmd |= command_bits;
6052 else
6053 cmd &= ~command_bits;
6054 pci_write_config_word(dev, PCI_COMMAND, cmd);
6055 }
6056
6057 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6058 return 0;
6059
6060 bus = dev->bus;
6061 while (bus) {
6062 bridge = bus->self;
6063 if (bridge) {
6064 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6065 &cmd);
Olivier Deprez157378f2022-04-04 15:47:50 +02006066 if (decode)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006067 cmd |= PCI_BRIDGE_CTL_VGA;
6068 else
6069 cmd &= ~PCI_BRIDGE_CTL_VGA;
6070 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6071 cmd);
6072 }
6073 bus = bus->parent;
6074 }
6075 return 0;
6076}
6077
Olivier Deprez0e641232021-09-23 10:07:05 +02006078#ifdef CONFIG_ACPI
6079bool pci_pr3_present(struct pci_dev *pdev)
6080{
6081 struct acpi_device *adev;
6082
6083 if (acpi_disabled)
6084 return false;
6085
6086 adev = ACPI_COMPANION(&pdev->dev);
6087 if (!adev)
6088 return false;
6089
6090 return adev->power.flags.power_resources &&
6091 acpi_has_method(adev->handle, "_PR3");
6092}
6093EXPORT_SYMBOL_GPL(pci_pr3_present);
6094#endif
6095
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006096/**
6097 * pci_add_dma_alias - Add a DMA devfn alias for a device
6098 * @dev: the PCI device for which alias is added
Olivier Deprez0e641232021-09-23 10:07:05 +02006099 * @devfn_from: alias slot and function
6100 * @nr_devfns: number of subsequent devfns to alias
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006101 *
6102 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6103 * which is used to program permissible bus-devfn source addresses for DMA
6104 * requests in an IOMMU. These aliases factor into IOMMU group creation
6105 * and are useful for devices generating DMA requests beyond or different
6106 * from their logical bus-devfn. Examples include device quirks where the
6107 * device simply uses the wrong devfn, as well as non-transparent bridges
6108 * where the alias may be a proxy for devices in another domain.
6109 *
6110 * IOMMU group creation is performed during device discovery or addition,
6111 * prior to any potential DMA mapping and therefore prior to driver probing
6112 * (especially for userspace assigned devices where IOMMU group definition
6113 * cannot be left as a userspace activity). DMA aliases should therefore
6114 * be configured via quirks, such as the PCI fixup header quirk.
6115 */
Olivier Deprez0e641232021-09-23 10:07:05 +02006116void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006117{
Olivier Deprez0e641232021-09-23 10:07:05 +02006118 int devfn_to;
6119
6120 nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6121 devfn_to = devfn_from + nr_devfns - 1;
6122
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006123 if (!dev->dma_alias_mask)
Olivier Deprez0e641232021-09-23 10:07:05 +02006124 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006125 if (!dev->dma_alias_mask) {
6126 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6127 return;
6128 }
6129
Olivier Deprez0e641232021-09-23 10:07:05 +02006130 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6131
6132 if (nr_devfns == 1)
6133 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6134 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6135 else if (nr_devfns > 1)
6136 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6137 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6138 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006139}
6140
6141bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6142{
6143 return (dev1->dma_alias_mask &&
6144 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6145 (dev2->dma_alias_mask &&
Olivier Deprez157378f2022-04-04 15:47:50 +02006146 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6147 pci_real_dma_dev(dev1) == dev2 ||
6148 pci_real_dma_dev(dev2) == dev1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006149}
6150
6151bool pci_device_is_present(struct pci_dev *pdev)
6152{
6153 u32 v;
6154
6155 if (pci_dev_is_disconnected(pdev))
6156 return false;
6157 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6158}
6159EXPORT_SYMBOL_GPL(pci_device_is_present);
6160
6161void pci_ignore_hotplug(struct pci_dev *dev)
6162{
6163 struct pci_dev *bridge = dev->bus->self;
6164
6165 dev->ignore_hotplug = 1;
6166 /* Propagate the "ignore hotplug" setting to the parent bridge. */
6167 if (bridge)
6168 bridge->ignore_hotplug = 1;
6169}
6170EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6171
Olivier Deprez157378f2022-04-04 15:47:50 +02006172/**
6173 * pci_real_dma_dev - Get PCI DMA device for PCI device
6174 * @dev: the PCI device that may have a PCI DMA alias
6175 *
6176 * Permits the platform to provide architecture-specific functionality to
6177 * devices needing to alias DMA to another PCI device on another PCI bus. If
6178 * the PCI device is on the same bus, it is recommended to use
6179 * pci_add_dma_alias(). This is the default implementation. Architecture
6180 * implementations can override this.
6181 */
6182struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6183{
6184 return dev;
6185}
6186
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006187resource_size_t __weak pcibios_default_alignment(void)
6188{
6189 return 0;
6190}
6191
David Brazdil0f672f62019-12-10 10:32:29 +00006192/*
6193 * Arches that don't want to expose struct resource to userland as-is in
6194 * sysfs and /proc can implement their own pci_resource_to_user().
6195 */
6196void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6197 const struct resource *rsrc,
6198 resource_size_t *start, resource_size_t *end)
6199{
6200 *start = rsrc->start;
6201 *end = rsrc->end;
6202}
6203
6204static char *resource_alignment_param;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006205static DEFINE_SPINLOCK(resource_alignment_lock);
6206
6207/**
6208 * pci_specified_resource_alignment - get resource alignment specified by user.
6209 * @dev: the PCI device to get
6210 * @resize: whether or not to change resources' size when reassigning alignment
6211 *
6212 * RETURNS: Resource alignment if it is specified.
6213 * Zero if it is not specified.
6214 */
6215static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6216 bool *resize)
6217{
6218 int align_order, count;
6219 resource_size_t align = pcibios_default_alignment();
6220 const char *p;
6221 int ret;
6222
6223 spin_lock(&resource_alignment_lock);
6224 p = resource_alignment_param;
David Brazdil0f672f62019-12-10 10:32:29 +00006225 if (!p || !*p)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006226 goto out;
6227 if (pci_has_flag(PCI_PROBE_ONLY)) {
6228 align = 0;
6229 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6230 goto out;
6231 }
6232
6233 while (*p) {
6234 count = 0;
6235 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
Olivier Deprez0e641232021-09-23 10:07:05 +02006236 p[count] == '@') {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006237 p += count + 1;
Olivier Deprez0e641232021-09-23 10:07:05 +02006238 if (align_order > 63) {
6239 pr_err("PCI: Invalid requested alignment (order %d)\n",
6240 align_order);
6241 align_order = PAGE_SHIFT;
6242 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006243 } else {
Olivier Deprez0e641232021-09-23 10:07:05 +02006244 align_order = PAGE_SHIFT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006245 }
6246
6247 ret = pci_dev_str_match(dev, p, &p);
6248 if (ret == 1) {
6249 *resize = true;
Olivier Deprez0e641232021-09-23 10:07:05 +02006250 align = 1ULL << align_order;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006251 break;
6252 } else if (ret < 0) {
6253 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6254 p);
6255 break;
6256 }
6257
6258 if (*p != ';' && *p != ',') {
6259 /* End of param or invalid format */
6260 break;
6261 }
6262 p++;
6263 }
6264out:
6265 spin_unlock(&resource_alignment_lock);
6266 return align;
6267}
6268
6269static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6270 resource_size_t align, bool resize)
6271{
6272 struct resource *r = &dev->resource[bar];
6273 resource_size_t size;
6274
6275 if (!(r->flags & IORESOURCE_MEM))
6276 return;
6277
6278 if (r->flags & IORESOURCE_PCI_FIXED) {
6279 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6280 bar, r, (unsigned long long)align);
6281 return;
6282 }
6283
6284 size = resource_size(r);
6285 if (size >= align)
6286 return;
6287
6288 /*
6289 * Increase the alignment of the resource. There are two ways we
6290 * can do this:
6291 *
6292 * 1) Increase the size of the resource. BARs are aligned on their
6293 * size, so when we reallocate space for this resource, we'll
6294 * allocate it with the larger alignment. This also prevents
6295 * assignment of any other BARs inside the alignment region, so
6296 * if we're requesting page alignment, this means no other BARs
6297 * will share the page.
6298 *
6299 * The disadvantage is that this makes the resource larger than
6300 * the hardware BAR, which may break drivers that compute things
6301 * based on the resource size, e.g., to find registers at a
6302 * fixed offset before the end of the BAR.
6303 *
6304 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6305 * set r->start to the desired alignment. By itself this
6306 * doesn't prevent other BARs being put inside the alignment
6307 * region, but if we realign *every* resource of every device in
6308 * the system, none of them will share an alignment region.
6309 *
6310 * When the user has requested alignment for only some devices via
6311 * the "pci=resource_alignment" argument, "resize" is true and we
6312 * use the first method. Otherwise we assume we're aligning all
6313 * devices and we use the second.
6314 */
6315
6316 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6317 bar, r, (unsigned long long)align);
6318
6319 if (resize) {
6320 r->start = 0;
6321 r->end = align - 1;
6322 } else {
6323 r->flags &= ~IORESOURCE_SIZEALIGN;
6324 r->flags |= IORESOURCE_STARTALIGN;
6325 r->start = align;
6326 r->end = r->start + size - 1;
6327 }
6328 r->flags |= IORESOURCE_UNSET;
6329}
6330
6331/*
6332 * This function disables memory decoding and releases memory resources
6333 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6334 * It also rounds up size to specified alignment.
6335 * Later on, the kernel will assign page-aligned memory resource back
6336 * to the device.
6337 */
6338void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6339{
6340 int i;
6341 struct resource *r;
6342 resource_size_t align;
6343 u16 command;
6344 bool resize = false;
6345
6346 /*
6347 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6348 * 3.4.1.11. Their resources are allocated from the space
6349 * described by the VF BARx register in the PF's SR-IOV capability.
6350 * We can't influence their alignment here.
6351 */
6352 if (dev->is_virtfn)
6353 return;
6354
6355 /* check if specified PCI is target device to reassign */
6356 align = pci_specified_resource_alignment(dev, &resize);
6357 if (!align)
6358 return;
6359
6360 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6361 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6362 pci_warn(dev, "Can't reassign resources to host bridge\n");
6363 return;
6364 }
6365
6366 pci_read_config_word(dev, PCI_COMMAND, &command);
6367 command &= ~PCI_COMMAND_MEMORY;
6368 pci_write_config_word(dev, PCI_COMMAND, command);
6369
6370 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6371 pci_request_resource_alignment(dev, i, align, resize);
6372
6373 /*
6374 * Need to disable bridge's resource window,
6375 * to enable the kernel to reassign new resource
6376 * window later on.
6377 */
David Brazdil0f672f62019-12-10 10:32:29 +00006378 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006379 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6380 r = &dev->resource[i];
6381 if (!(r->flags & IORESOURCE_MEM))
6382 continue;
6383 r->flags |= IORESOURCE_UNSET;
6384 r->end = resource_size(r) - 1;
6385 r->start = 0;
6386 }
6387 pci_disable_bridge_window(dev);
6388 }
6389}
6390
David Brazdil0f672f62019-12-10 10:32:29 +00006391static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006392{
David Brazdil0f672f62019-12-10 10:32:29 +00006393 size_t count = 0;
6394
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006395 spin_lock(&resource_alignment_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00006396 if (resource_alignment_param)
Olivier Deprez157378f2022-04-04 15:47:50 +02006397 count = scnprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006398 spin_unlock(&resource_alignment_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00006399
6400 /*
6401 * When set by the command line, resource_alignment_param will not
6402 * have a trailing line feed, which is ugly. So conditionally add
6403 * it here.
6404 */
6405 if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6406 buf[count - 1] = '\n';
6407 buf[count++] = 0;
6408 }
6409
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006410 return count;
6411}
6412
David Brazdil0f672f62019-12-10 10:32:29 +00006413static ssize_t resource_alignment_store(struct bus_type *bus,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006414 const char *buf, size_t count)
6415{
David Brazdil0f672f62019-12-10 10:32:29 +00006416 char *param = kstrndup(buf, count, GFP_KERNEL);
6417
6418 if (!param)
6419 return -ENOMEM;
6420
6421 spin_lock(&resource_alignment_lock);
6422 kfree(resource_alignment_param);
6423 resource_alignment_param = param;
6424 spin_unlock(&resource_alignment_lock);
6425 return count;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006426}
6427
David Brazdil0f672f62019-12-10 10:32:29 +00006428static BUS_ATTR_RW(resource_alignment);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006429
6430static int __init pci_resource_alignment_sysfs_init(void)
6431{
6432 return bus_create_file(&pci_bus_type,
6433 &bus_attr_resource_alignment);
6434}
6435late_initcall(pci_resource_alignment_sysfs_init);
6436
6437static void pci_no_domains(void)
6438{
6439#ifdef CONFIG_PCI_DOMAINS
6440 pci_domains_supported = 0;
6441#endif
6442}
6443
6444#ifdef CONFIG_PCI_DOMAINS_GENERIC
6445static atomic_t __domain_nr = ATOMIC_INIT(-1);
6446
6447static int pci_get_new_domain_nr(void)
6448{
6449 return atomic_inc_return(&__domain_nr);
6450}
6451
6452static int of_pci_bus_find_domain_nr(struct device *parent)
6453{
6454 static int use_dt_domains = -1;
6455 int domain = -1;
6456
6457 if (parent)
6458 domain = of_get_pci_domain_nr(parent->of_node);
David Brazdil0f672f62019-12-10 10:32:29 +00006459
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006460 /*
6461 * Check DT domain and use_dt_domains values.
6462 *
6463 * If DT domain property is valid (domain >= 0) and
6464 * use_dt_domains != 0, the DT assignment is valid since this means
6465 * we have not previously allocated a domain number by using
6466 * pci_get_new_domain_nr(); we should also update use_dt_domains to
6467 * 1, to indicate that we have just assigned a domain number from
6468 * DT.
6469 *
6470 * If DT domain property value is not valid (ie domain < 0), and we
6471 * have not previously assigned a domain number from DT
6472 * (use_dt_domains != 1) we should assign a domain number by
6473 * using the:
6474 *
6475 * pci_get_new_domain_nr()
6476 *
6477 * API and update the use_dt_domains value to keep track of method we
6478 * are using to assign domain numbers (use_dt_domains = 0).
6479 *
6480 * All other combinations imply we have a platform that is trying
6481 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
6482 * which is a recipe for domain mishandling and it is prevented by
6483 * invalidating the domain value (domain = -1) and printing a
6484 * corresponding error.
6485 */
6486 if (domain >= 0 && use_dt_domains) {
6487 use_dt_domains = 1;
6488 } else if (domain < 0 && use_dt_domains != 1) {
6489 use_dt_domains = 0;
6490 domain = pci_get_new_domain_nr();
6491 } else {
6492 if (parent)
6493 pr_err("Node %pOF has ", parent->of_node);
6494 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6495 domain = -1;
6496 }
6497
6498 return domain;
6499}
6500
6501int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6502{
6503 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6504 acpi_pci_bus_find_domain_nr(bus);
6505}
6506#endif
6507
6508/**
6509 * pci_ext_cfg_avail - can we access extended PCI config space?
6510 *
6511 * Returns 1 if we can access PCI extended config space (offsets
6512 * greater than 0xff). This is the default implementation. Architecture
6513 * implementations can override this.
6514 */
6515int __weak pci_ext_cfg_avail(void)
6516{
6517 return 1;
6518}
6519
6520void __weak pci_fixup_cardbus(struct pci_bus *bus)
6521{
6522}
6523EXPORT_SYMBOL(pci_fixup_cardbus);
6524
6525static int __init pci_setup(char *str)
6526{
6527 while (str) {
6528 char *k = strchr(str, ',');
6529 if (k)
6530 *k++ = 0;
6531 if (*str && (str = pcibios_setup(str)) && *str) {
6532 if (!strcmp(str, "nomsi")) {
6533 pci_no_msi();
6534 } else if (!strncmp(str, "noats", 5)) {
6535 pr_info("PCIe: ATS is disabled\n");
6536 pcie_ats_disabled = true;
6537 } else if (!strcmp(str, "noaer")) {
6538 pci_no_aer();
6539 } else if (!strcmp(str, "earlydump")) {
6540 pci_early_dump = true;
6541 } else if (!strncmp(str, "realloc=", 8)) {
6542 pci_realloc_get_opt(str + 8);
6543 } else if (!strncmp(str, "realloc", 7)) {
6544 pci_realloc_get_opt("on");
6545 } else if (!strcmp(str, "nodomains")) {
6546 pci_no_domains();
6547 } else if (!strncmp(str, "noari", 5)) {
6548 pcie_ari_disabled = true;
6549 } else if (!strncmp(str, "cbiosize=", 9)) {
6550 pci_cardbus_io_size = memparse(str + 9, &str);
6551 } else if (!strncmp(str, "cbmemsize=", 10)) {
6552 pci_cardbus_mem_size = memparse(str + 10, &str);
6553 } else if (!strncmp(str, "resource_alignment=", 19)) {
David Brazdil0f672f62019-12-10 10:32:29 +00006554 resource_alignment_param = str + 19;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006555 } else if (!strncmp(str, "ecrc=", 5)) {
6556 pcie_ecrc_get_policy(str + 5);
6557 } else if (!strncmp(str, "hpiosize=", 9)) {
6558 pci_hotplug_io_size = memparse(str + 9, &str);
Olivier Deprez157378f2022-04-04 15:47:50 +02006559 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6560 pci_hotplug_mmio_size = memparse(str + 11, &str);
6561 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6562 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006563 } else if (!strncmp(str, "hpmemsize=", 10)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02006564 pci_hotplug_mmio_size = memparse(str + 10, &str);
6565 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006566 } else if (!strncmp(str, "hpbussize=", 10)) {
6567 pci_hotplug_bus_size =
6568 simple_strtoul(str + 10, &str, 0);
6569 if (pci_hotplug_bus_size > 0xff)
6570 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6571 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6572 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6573 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6574 pcie_bus_config = PCIE_BUS_SAFE;
6575 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6576 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6577 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6578 pcie_bus_config = PCIE_BUS_PEER2PEER;
6579 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6580 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6581 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6582 disable_acs_redir_param = str + 18;
6583 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00006584 pr_err("PCI: Unknown option `%s'\n", str);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006585 }
6586 }
6587 str = k;
6588 }
6589 return 0;
6590}
6591early_param("pci", pci_setup);
David Brazdil0f672f62019-12-10 10:32:29 +00006592
6593/*
6594 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6595 * in pci_setup(), above, to point to data in the __initdata section which
6596 * will be freed after the init sequence is complete. We can't allocate memory
6597 * in pci_setup() because some architectures do not have any memory allocation
6598 * service available during an early_param() call. So we allocate memory and
6599 * copy the variable here before the init section is freed.
6600 *
6601 */
6602static int __init pci_realloc_setup_params(void)
6603{
6604 resource_alignment_param = kstrdup(resource_alignment_param,
6605 GFP_KERNEL);
6606 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6607
6608 return 0;
6609}
6610pure_initcall(pci_realloc_setup_params);