blob: ac0557a305aff383f7a0d9e7ea4112904081f351 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Enable PCIe link L0s/L1 state and Clock Power Management
4 *
5 * Copyright (C) 2007 Intel
6 * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
7 * Copyright (C) Shaohua Li (shaohua.li@intel.com)
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/pci.h>
14#include <linux/pci_regs.h>
15#include <linux/errno.h>
16#include <linux/pm.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/jiffies.h>
20#include <linux/delay.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021#include "../pci.h"
22
23#ifdef MODULE_PARAM_PREFIX
24#undef MODULE_PARAM_PREFIX
25#endif
26#define MODULE_PARAM_PREFIX "pcie_aspm."
27
28/* Note: those are not register definitions */
29#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
31#define ASPM_STATE_L1 (4) /* L1 state */
32#define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */
33#define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */
34#define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */
35#define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */
36#define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM)
37#define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM)
38#define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\
39 ASPM_STATE_L1_2_MASK)
40#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
41#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
42 ASPM_STATE_L1SS)
43
44struct aspm_latency {
45 u32 l0s; /* L0s latency (nsec) */
46 u32 l1; /* L1 latency (nsec) */
47};
48
49struct pcie_link_state {
50 struct pci_dev *pdev; /* Upstream component of the Link */
51 struct pci_dev *downstream; /* Downstream component, function 0 */
52 struct pcie_link_state *root; /* pointer to the root port link */
53 struct pcie_link_state *parent; /* pointer to the parent Link state */
54 struct list_head sibling; /* node in link_list */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055
56 /* ASPM state */
57 u32 aspm_support:7; /* Supported ASPM state */
58 u32 aspm_enabled:7; /* Enabled ASPM state */
59 u32 aspm_capable:7; /* Capable ASPM state with latency */
60 u32 aspm_default:7; /* Default ASPM state by BIOS */
61 u32 aspm_disable:7; /* Disabled ASPM state */
62
63 /* Clock PM state */
64 u32 clkpm_capable:1; /* Clock PM capable? */
65 u32 clkpm_enabled:1; /* Current Clock PM state */
66 u32 clkpm_default:1; /* Default Clock PM state by BIOS */
Olivier Deprez0e641232021-09-23 10:07:05 +020067 u32 clkpm_disable:1; /* Clock PM disabled */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068
69 /* Exit latencies */
70 struct aspm_latency latency_up; /* Upstream direction exit latency */
71 struct aspm_latency latency_dw; /* Downstream direction exit latency */
72 /*
73 * Endpoint acceptable latencies. A pcie downstream port only
74 * has one slot under it, so at most there are 8 functions.
75 */
76 struct aspm_latency acceptable[8];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077};
78
79static int aspm_disabled, aspm_force;
80static bool aspm_support_enabled = true;
81static DEFINE_MUTEX(aspm_lock);
82static LIST_HEAD(link_list);
83
84#define POLICY_DEFAULT 0 /* BIOS default setting */
85#define POLICY_PERFORMANCE 1 /* high performance */
86#define POLICY_POWERSAVE 2 /* high power saving */
87#define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */
88
89#ifdef CONFIG_PCIEASPM_PERFORMANCE
90static int aspm_policy = POLICY_PERFORMANCE;
91#elif defined CONFIG_PCIEASPM_POWERSAVE
92static int aspm_policy = POLICY_POWERSAVE;
93#elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE
94static int aspm_policy = POLICY_POWER_SUPERSAVE;
95#else
96static int aspm_policy;
97#endif
98
99static const char *policy_str[] = {
100 [POLICY_DEFAULT] = "default",
101 [POLICY_PERFORMANCE] = "performance",
102 [POLICY_POWERSAVE] = "powersave",
103 [POLICY_POWER_SUPERSAVE] = "powersupersave"
104};
105
106#define LINK_RETRAIN_TIMEOUT HZ
107
108static int policy_to_aspm_state(struct pcie_link_state *link)
109{
110 switch (aspm_policy) {
111 case POLICY_PERFORMANCE:
112 /* Disable ASPM and Clock PM */
113 return 0;
114 case POLICY_POWERSAVE:
115 /* Enable ASPM L0s/L1 */
116 return (ASPM_STATE_L0S | ASPM_STATE_L1);
117 case POLICY_POWER_SUPERSAVE:
118 /* Enable Everything */
119 return ASPM_STATE_ALL;
120 case POLICY_DEFAULT:
121 return link->aspm_default;
122 }
123 return 0;
124}
125
126static int policy_to_clkpm_state(struct pcie_link_state *link)
127{
128 switch (aspm_policy) {
129 case POLICY_PERFORMANCE:
130 /* Disable ASPM and Clock PM */
131 return 0;
132 case POLICY_POWERSAVE:
133 case POLICY_POWER_SUPERSAVE:
134 /* Enable Clock PM */
135 return 1;
136 case POLICY_DEFAULT:
137 return link->clkpm_default;
138 }
139 return 0;
140}
141
142static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
143{
144 struct pci_dev *child;
145 struct pci_bus *linkbus = link->pdev->subordinate;
146 u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
147
148 list_for_each_entry(child, &linkbus->devices, bus_list)
149 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
150 PCI_EXP_LNKCTL_CLKREQ_EN,
151 val);
152 link->clkpm_enabled = !!enable;
153}
154
155static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
156{
Olivier Deprez0e641232021-09-23 10:07:05 +0200157 /*
158 * Don't enable Clock PM if the link is not Clock PM capable
159 * or Clock PM is disabled
160 */
161 if (!link->clkpm_capable || link->clkpm_disable)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000162 enable = 0;
163 /* Need nothing if the specified equals to current state */
164 if (link->clkpm_enabled == enable)
165 return;
166 pcie_set_clkpm_nocheck(link, enable);
167}
168
169static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
170{
171 int capable = 1, enabled = 1;
172 u32 reg32;
173 u16 reg16;
174 struct pci_dev *child;
175 struct pci_bus *linkbus = link->pdev->subordinate;
176
177 /* All functions should have the same cap and state, take the worst */
178 list_for_each_entry(child, &linkbus->devices, bus_list) {
179 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &reg32);
180 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
181 capable = 0;
182 enabled = 0;
183 break;
184 }
185 pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
186 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
187 enabled = 0;
188 }
189 link->clkpm_enabled = enabled;
190 link->clkpm_default = enabled;
Olivier Deprez0e641232021-09-23 10:07:05 +0200191 link->clkpm_capable = capable;
192 link->clkpm_disable = blacklist ? 1 : 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193}
194
David Brazdil0f672f62019-12-10 10:32:29 +0000195static bool pcie_retrain_link(struct pcie_link_state *link)
196{
197 struct pci_dev *parent = link->pdev;
198 unsigned long end_jiffies;
199 u16 reg16;
200
201 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
202 reg16 |= PCI_EXP_LNKCTL_RL;
203 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
204 if (parent->clear_retrain_link) {
205 /*
206 * Due to an erratum in some devices the Retrain Link bit
207 * needs to be cleared again manually to allow the link
208 * training to succeed.
209 */
210 reg16 &= ~PCI_EXP_LNKCTL_RL;
211 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
212 }
213
214 /* Wait for link training end. Break out after waiting for timeout */
215 end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
216 do {
217 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
218 if (!(reg16 & PCI_EXP_LNKSTA_LT))
219 break;
220 msleep(1);
221 } while (time_before(jiffies, end_jiffies));
222 return !(reg16 & PCI_EXP_LNKSTA_LT);
223}
224
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000225/*
226 * pcie_aspm_configure_common_clock: check if the 2 ends of a link
227 * could use common clock. If they are, configure them to use the
228 * common clock. That will reduce the ASPM state exit latency.
229 */
230static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
231{
232 int same_clock = 1;
233 u16 reg16, parent_reg, child_reg[8];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000234 struct pci_dev *child, *parent = link->pdev;
235 struct pci_bus *linkbus = parent->subordinate;
236 /*
237 * All functions of a slot should have the same Slot Clock
238 * Configuration, so just check one function
239 */
240 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
241 BUG_ON(!pci_is_pcie(child));
242
243 /* Check downstream component if bit Slot Clock Configuration is 1 */
244 pcie_capability_read_word(child, PCI_EXP_LNKSTA, &reg16);
245 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
246 same_clock = 0;
247
248 /* Check upstream component if bit Slot Clock Configuration is 1 */
249 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
250 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
251 same_clock = 0;
252
253 /* Port might be already in common clock mode */
254 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
255 if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
256 bool consistent = true;
257
258 list_for_each_entry(child, &linkbus->devices, bus_list) {
259 pcie_capability_read_word(child, PCI_EXP_LNKCTL,
260 &reg16);
261 if (!(reg16 & PCI_EXP_LNKCTL_CCC)) {
262 consistent = false;
263 break;
264 }
265 }
266 if (consistent)
267 return;
Olivier Deprez157378f2022-04-04 15:47:50 +0200268 pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000269 }
270
271 /* Configure downstream component, all functions */
272 list_for_each_entry(child, &linkbus->devices, bus_list) {
273 pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
274 child_reg[PCI_FUNC(child->devfn)] = reg16;
275 if (same_clock)
276 reg16 |= PCI_EXP_LNKCTL_CCC;
277 else
278 reg16 &= ~PCI_EXP_LNKCTL_CCC;
279 pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16);
280 }
281
282 /* Configure upstream component */
283 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
284 parent_reg = reg16;
285 if (same_clock)
286 reg16 |= PCI_EXP_LNKCTL_CCC;
287 else
288 reg16 &= ~PCI_EXP_LNKCTL_CCC;
289 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
290
David Brazdil0f672f62019-12-10 10:32:29 +0000291 if (pcie_retrain_link(link))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000292 return;
293
294 /* Training failed. Restore common clock configurations */
295 pci_err(parent, "ASPM: Could not configure common clock\n");
296 list_for_each_entry(child, &linkbus->devices, bus_list)
297 pcie_capability_write_word(child, PCI_EXP_LNKCTL,
298 child_reg[PCI_FUNC(child->devfn)]);
299 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
300}
301
302/* Convert L0s latency encoding to ns */
Olivier Deprez157378f2022-04-04 15:47:50 +0200303static u32 calc_l0s_latency(u32 lnkcap)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000304{
Olivier Deprez157378f2022-04-04 15:47:50 +0200305 u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
306
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307 if (encoding == 0x7)
308 return (5 * 1000); /* > 4us */
309 return (64 << encoding);
310}
311
312/* Convert L0s acceptable latency encoding to ns */
313static u32 calc_l0s_acceptable(u32 encoding)
314{
315 if (encoding == 0x7)
316 return -1U;
317 return (64 << encoding);
318}
319
320/* Convert L1 latency encoding to ns */
Olivier Deprez157378f2022-04-04 15:47:50 +0200321static u32 calc_l1_latency(u32 lnkcap)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000322{
Olivier Deprez157378f2022-04-04 15:47:50 +0200323 u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
324
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000325 if (encoding == 0x7)
326 return (65 * 1000); /* > 64us */
327 return (1000 << encoding);
328}
329
330/* Convert L1 acceptable latency encoding to ns */
331static u32 calc_l1_acceptable(u32 encoding)
332{
333 if (encoding == 0x7)
334 return -1U;
335 return (1000 << encoding);
336}
337
338/* Convert L1SS T_pwr encoding to usec */
339static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
340{
341 switch (scale) {
342 case 0:
343 return val * 2;
344 case 1:
345 return val * 10;
346 case 2:
347 return val * 100;
348 }
349 pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale);
350 return 0;
351}
352
353static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
354{
355 u32 threshold_ns = threshold_us * 1000;
356
357 /* See PCIe r3.1, sec 7.33.3 and sec 6.18 */
358 if (threshold_ns < 32) {
359 *scale = 0;
360 *value = threshold_ns;
361 } else if (threshold_ns < 1024) {
362 *scale = 1;
363 *value = threshold_ns >> 5;
364 } else if (threshold_ns < 32768) {
365 *scale = 2;
366 *value = threshold_ns >> 10;
367 } else if (threshold_ns < 1048576) {
368 *scale = 3;
369 *value = threshold_ns >> 15;
370 } else if (threshold_ns < 33554432) {
371 *scale = 4;
372 *value = threshold_ns >> 20;
373 } else {
374 *scale = 5;
375 *value = threshold_ns >> 25;
376 }
377}
378
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000379static void pcie_aspm_check_latency(struct pci_dev *endpoint)
380{
381 u32 latency, l1_switch_latency = 0;
382 struct aspm_latency *acceptable;
383 struct pcie_link_state *link;
384
385 /* Device not in D0 doesn't need latency check */
386 if ((endpoint->current_state != PCI_D0) &&
387 (endpoint->current_state != PCI_UNKNOWN))
388 return;
389
390 link = endpoint->bus->self->link_state;
391 acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
392
393 while (link) {
394 /* Check upstream direction L0s latency */
395 if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
396 (link->latency_up.l0s > acceptable->l0s))
397 link->aspm_capable &= ~ASPM_STATE_L0S_UP;
398
399 /* Check downstream direction L0s latency */
400 if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
401 (link->latency_dw.l0s > acceptable->l0s))
402 link->aspm_capable &= ~ASPM_STATE_L0S_DW;
403 /*
404 * Check L1 latency.
405 * Every switch on the path to root complex need 1
406 * more microsecond for L1. Spec doesn't mention L0s.
407 *
408 * The exit latencies for L1 substates are not advertised
409 * by a device. Since the spec also doesn't mention a way
410 * to determine max latencies introduced by enabling L1
411 * substates on the components, it is not clear how to do
412 * a L1 substate exit latency check. We assume that the
413 * L1 exit latencies advertised by a device include L1
414 * substate latencies (and hence do not do any check).
415 */
416 latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1);
417 if ((link->aspm_capable & ASPM_STATE_L1) &&
418 (latency + l1_switch_latency > acceptable->l1))
419 link->aspm_capable &= ~ASPM_STATE_L1;
420 l1_switch_latency += 1000;
421
422 link = link->parent;
423 }
424}
425
426/*
427 * The L1 PM substate capability is only implemented in function 0 in a
428 * multi function device.
429 */
430static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
431{
432 struct pci_dev *child;
433
434 list_for_each_entry(child, &linkbus->devices, bus_list)
435 if (PCI_FUNC(child->devfn) == 0)
436 return child;
437 return NULL;
438}
439
Olivier Deprez157378f2022-04-04 15:47:50 +0200440static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
441 u32 clear, u32 set)
442{
443 u32 val;
444
445 pci_read_config_dword(pdev, pos, &val);
446 val &= ~clear;
447 val |= set;
448 pci_write_config_dword(pdev, pos, val);
449}
450
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000451/* Calculate L1.2 PM substate timing parameters */
452static void aspm_calc_l1ss_info(struct pcie_link_state *link,
Olivier Deprez157378f2022-04-04 15:47:50 +0200453 u32 parent_l1ss_cap, u32 child_l1ss_cap)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000454{
Olivier Deprez157378f2022-04-04 15:47:50 +0200455 struct pci_dev *child = link->downstream, *parent = link->pdev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000456 u32 val1, val2, scale1, scale2;
457 u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
Olivier Deprez157378f2022-04-04 15:47:50 +0200458 u32 ctl1 = 0, ctl2 = 0;
459 u32 pctl1, pctl2, cctl1, cctl2;
460 u32 pl1_2_enables, cl1_2_enables;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000461
462 if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
463 return;
464
465 /* Choose the greater of the two Port Common_Mode_Restore_Times */
Olivier Deprez157378f2022-04-04 15:47:50 +0200466 val1 = (parent_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
467 val2 = (child_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000468 t_common_mode = max(val1, val2);
469
470 /* Choose the greater of the two Port T_POWER_ON times */
Olivier Deprez157378f2022-04-04 15:47:50 +0200471 val1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
472 scale1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
473 val2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
474 scale2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000475
Olivier Deprez157378f2022-04-04 15:47:50 +0200476 if (calc_l1ss_pwron(parent, scale1, val1) >
477 calc_l1ss_pwron(child, scale2, val2)) {
478 ctl2 |= scale1 | (val1 << 3);
479 t_power_on = calc_l1ss_pwron(parent, scale1, val1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +0200481 ctl2 |= scale2 | (val2 << 3);
482 t_power_on = calc_l1ss_pwron(child, scale2, val2);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000483 }
484
485 /*
486 * Set LTR_L1.2_THRESHOLD to the time required to transition the
487 * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if
488 * downstream devices report (via LTR) that they can tolerate at
489 * least that much latency.
490 *
491 * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and
492 * Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at
493 * least 4us.
494 */
495 l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
496 encode_l12_threshold(l1_2_threshold, &scale, &value);
Olivier Deprez157378f2022-04-04 15:47:50 +0200497 ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
498
499 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
500 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
501 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
502 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2);
503
504 if (ctl1 == pctl1 && ctl1 == cctl1 &&
505 ctl2 == pctl2 && ctl2 == cctl2)
506 return;
507
508 /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
509 pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
510 cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
511
512 if (pl1_2_enables || cl1_2_enables) {
513 pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
514 PCI_L1SS_CTL1_L1_2_MASK, 0);
515 pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
516 PCI_L1SS_CTL1_L1_2_MASK, 0);
517 }
518
519 /* Program T_POWER_ON times in both ports */
520 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
521 pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
522
523 /* Program Common_Mode_Restore_Time in upstream device */
524 pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
525 PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
526
527 /* Program LTR_L1.2_THRESHOLD time in both ports */
528 pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
529 PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
530 PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
531 pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
532 PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
533 PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
534
535 if (pl1_2_enables || cl1_2_enables) {
536 pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
537 pl1_2_enables);
538 pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
539 cl1_2_enables);
540 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000541}
542
543static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
544{
545 struct pci_dev *child = link->downstream, *parent = link->pdev;
Olivier Deprez157378f2022-04-04 15:47:50 +0200546 u32 parent_lnkcap, child_lnkcap;
547 u16 parent_lnkctl, child_lnkctl;
548 u32 parent_l1ss_cap, child_l1ss_cap;
549 u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550 struct pci_bus *linkbus = parent->subordinate;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000551
552 if (blacklist) {
553 /* Set enabled/disable so that we will disable ASPM later */
554 link->aspm_enabled = ASPM_STATE_ALL;
555 link->aspm_disable = ASPM_STATE_ALL;
556 return;
557 }
558
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000559 /*
560 * If ASPM not supported, don't mess with the clocks and link,
561 * bail out now.
562 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200563 pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
564 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
565 if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000566 return;
567
568 /* Configure common clock before checking latencies */
569 pcie_aspm_configure_common_clock(link);
570
571 /*
Olivier Deprez157378f2022-04-04 15:47:50 +0200572 * Re-read upstream/downstream components' register state after
573 * clock configuration. L0s & L1 exit latencies in the otherwise
574 * read-only Link Capabilities may change depending on common clock
575 * configuration (PCIe r5.0, sec 7.5.3.6).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000576 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200577 pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
578 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
579 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
580 pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000581
582 /*
583 * Setup L0s state
584 *
585 * Note that we must not enable L0s in either direction on a
586 * given link unless components on both sides of the link each
587 * support L0s.
588 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200589 if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000590 link->aspm_support |= ASPM_STATE_L0S;
Olivier Deprez157378f2022-04-04 15:47:50 +0200591
592 if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000593 link->aspm_enabled |= ASPM_STATE_L0S_UP;
Olivier Deprez157378f2022-04-04 15:47:50 +0200594 if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000595 link->aspm_enabled |= ASPM_STATE_L0S_DW;
Olivier Deprez157378f2022-04-04 15:47:50 +0200596 link->latency_up.l0s = calc_l0s_latency(parent_lnkcap);
597 link->latency_dw.l0s = calc_l0s_latency(child_lnkcap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000598
599 /* Setup L1 state */
Olivier Deprez157378f2022-04-04 15:47:50 +0200600 if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000601 link->aspm_support |= ASPM_STATE_L1;
Olivier Deprez157378f2022-04-04 15:47:50 +0200602
603 if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000604 link->aspm_enabled |= ASPM_STATE_L1;
Olivier Deprez157378f2022-04-04 15:47:50 +0200605 link->latency_up.l1 = calc_l1_latency(parent_lnkcap);
606 link->latency_dw.l1 = calc_l1_latency(child_lnkcap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000607
608 /* Setup L1 substate */
Olivier Deprez157378f2022-04-04 15:47:50 +0200609 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
610 &parent_l1ss_cap);
611 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
612 &child_l1ss_cap);
613
614 if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
615 parent_l1ss_cap = 0;
616 if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
617 child_l1ss_cap = 0;
618
619 /*
620 * If we don't have LTR for the entire path from the Root Complex
621 * to this device, we can't use ASPM L1.2 because it relies on the
622 * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
623 */
624 if (!child->ltr_path)
625 child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
626
627 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000628 link->aspm_support |= ASPM_STATE_L1_1;
Olivier Deprez157378f2022-04-04 15:47:50 +0200629 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000630 link->aspm_support |= ASPM_STATE_L1_2;
Olivier Deprez157378f2022-04-04 15:47:50 +0200631 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000632 link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
Olivier Deprez157378f2022-04-04 15:47:50 +0200633 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000634 link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
635
Olivier Deprez157378f2022-04-04 15:47:50 +0200636 if (parent_l1ss_cap)
637 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
638 &parent_l1ss_ctl1);
639 if (child_l1ss_cap)
640 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
641 &child_l1ss_ctl1);
642
643 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000644 link->aspm_enabled |= ASPM_STATE_L1_1;
Olivier Deprez157378f2022-04-04 15:47:50 +0200645 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000646 link->aspm_enabled |= ASPM_STATE_L1_2;
Olivier Deprez157378f2022-04-04 15:47:50 +0200647 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000648 link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
Olivier Deprez157378f2022-04-04 15:47:50 +0200649 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000650 link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
651
652 if (link->aspm_support & ASPM_STATE_L1SS)
Olivier Deprez157378f2022-04-04 15:47:50 +0200653 aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000654
655 /* Save default state */
656 link->aspm_default = link->aspm_enabled;
657
658 /* Setup initial capable state. Will be updated later */
659 link->aspm_capable = link->aspm_support;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000660
661 /* Get and check endpoint acceptable latencies */
662 list_for_each_entry(child, &linkbus->devices, bus_list) {
663 u32 reg32, encoding;
664 struct aspm_latency *acceptable =
665 &link->acceptable[PCI_FUNC(child->devfn)];
666
667 if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
668 pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
669 continue;
670
671 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
672 /* Calculate endpoint L0s acceptable latency */
673 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
674 acceptable->l0s = calc_l0s_acceptable(encoding);
675 /* Calculate endpoint L1 acceptable latency */
676 encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
677 acceptable->l1 = calc_l1_acceptable(encoding);
678
679 pcie_aspm_check_latency(child);
680 }
681}
682
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000683/* Configure the ASPM L1 substates */
684static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
685{
686 u32 val, enable_req;
687 struct pci_dev *child = link->downstream, *parent = link->pdev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000688
689 enable_req = (link->aspm_enabled ^ state) & state;
690
691 /*
692 * Here are the rules specified in the PCIe spec for enabling L1SS:
693 * - When enabling L1.x, enable bit at parent first, then at child
694 * - When disabling L1.x, disable bit at child first, then at parent
695 * - When enabling ASPM L1.x, need to disable L1
696 * (at child followed by parent).
697 * - The ASPM/PCIPM L1.2 must be disabled while programming timing
698 * parameters
699 *
700 * To keep it simple, disable all L1SS bits first, and later enable
701 * what is needed.
702 */
703
704 /* Disable all L1 substates */
Olivier Deprez157378f2022-04-04 15:47:50 +0200705 pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000706 PCI_L1SS_CTL1_L1SS_MASK, 0);
Olivier Deprez157378f2022-04-04 15:47:50 +0200707 pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000708 PCI_L1SS_CTL1_L1SS_MASK, 0);
709 /*
710 * If needed, disable L1, and it gets enabled later
711 * in pcie_config_aspm_link().
712 */
713 if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
714 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
715 PCI_EXP_LNKCTL_ASPM_L1, 0);
716 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
717 PCI_EXP_LNKCTL_ASPM_L1, 0);
718 }
719
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000720 val = 0;
721 if (state & ASPM_STATE_L1_1)
722 val |= PCI_L1SS_CTL1_ASPM_L1_1;
723 if (state & ASPM_STATE_L1_2)
724 val |= PCI_L1SS_CTL1_ASPM_L1_2;
725 if (state & ASPM_STATE_L1_1_PCIPM)
726 val |= PCI_L1SS_CTL1_PCIPM_L1_1;
727 if (state & ASPM_STATE_L1_2_PCIPM)
728 val |= PCI_L1SS_CTL1_PCIPM_L1_2;
729
730 /* Enable what we need to enable */
Olivier Deprez157378f2022-04-04 15:47:50 +0200731 pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
Olivier Deprez0e641232021-09-23 10:07:05 +0200732 PCI_L1SS_CTL1_L1SS_MASK, val);
Olivier Deprez157378f2022-04-04 15:47:50 +0200733 pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
Olivier Deprez0e641232021-09-23 10:07:05 +0200734 PCI_L1SS_CTL1_L1SS_MASK, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000735}
736
737static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
738{
739 pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
740 PCI_EXP_LNKCTL_ASPMC, val);
741}
742
743static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
744{
745 u32 upstream = 0, dwstream = 0;
746 struct pci_dev *child = link->downstream, *parent = link->pdev;
747 struct pci_bus *linkbus = parent->subordinate;
748
749 /* Enable only the states that were not explicitly disabled */
750 state &= (link->aspm_capable & ~link->aspm_disable);
751
752 /* Can't enable any substates if L1 is not enabled */
753 if (!(state & ASPM_STATE_L1))
754 state &= ~ASPM_STATE_L1SS;
755
756 /* Spec says both ports must be in D0 before enabling PCI PM substates*/
757 if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
758 state &= ~ASPM_STATE_L1_SS_PCIPM;
759 state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM);
760 }
761
762 /* Nothing to do if the link is already in the requested state */
763 if (link->aspm_enabled == state)
764 return;
765 /* Convert ASPM state to upstream/downstream ASPM register state */
766 if (state & ASPM_STATE_L0S_UP)
767 dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
768 if (state & ASPM_STATE_L0S_DW)
769 upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
770 if (state & ASPM_STATE_L1) {
771 upstream |= PCI_EXP_LNKCTL_ASPM_L1;
772 dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
773 }
774
775 if (link->aspm_capable & ASPM_STATE_L1SS)
776 pcie_config_aspm_l1ss(link, state);
777
778 /*
779 * Spec 2.0 suggests all functions should be configured the
780 * same setting for ASPM. Enabling ASPM L1 should be done in
781 * upstream component first and then downstream, and vice
782 * versa for disabling ASPM L1. Spec doesn't mention L0S.
783 */
784 if (state & ASPM_STATE_L1)
785 pcie_config_aspm_dev(parent, upstream);
786 list_for_each_entry(child, &linkbus->devices, bus_list)
787 pcie_config_aspm_dev(child, dwstream);
788 if (!(state & ASPM_STATE_L1))
789 pcie_config_aspm_dev(parent, upstream);
790
791 link->aspm_enabled = state;
792}
793
794static void pcie_config_aspm_path(struct pcie_link_state *link)
795{
796 while (link) {
797 pcie_config_aspm_link(link, policy_to_aspm_state(link));
798 link = link->parent;
799 }
800}
801
802static void free_link_state(struct pcie_link_state *link)
803{
804 link->pdev->link_state = NULL;
805 kfree(link);
806}
807
808static int pcie_aspm_sanity_check(struct pci_dev *pdev)
809{
810 struct pci_dev *child;
811 u32 reg32;
812
813 /*
814 * Some functions in a slot might not all be PCIe functions,
815 * very strange. Disable ASPM for the whole slot
816 */
817 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
818 if (!pci_is_pcie(child))
819 return -EINVAL;
820
821 /*
822 * If ASPM is disabled then we're not going to change
823 * the BIOS state. It's safe to continue even if it's a
824 * pre-1.1 device
825 */
826
827 if (aspm_disabled)
828 continue;
829
830 /*
831 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
832 * RBER bit to determine if a function is 1.1 version device
833 */
834 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
835 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
836 pci_info(child, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n");
837 return -EINVAL;
838 }
839 }
840 return 0;
841}
842
843static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
844{
845 struct pcie_link_state *link;
846
847 link = kzalloc(sizeof(*link), GFP_KERNEL);
848 if (!link)
849 return NULL;
850
851 INIT_LIST_HEAD(&link->sibling);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000852 link->pdev = pdev;
853 link->downstream = pci_function_0(pdev->subordinate);
854
855 /*
856 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
857 * hierarchies. Note that some PCIe host implementations omit
858 * the root ports entirely, in which case a downstream port on
859 * a switch may become the root of the link state chain for all
860 * its subordinate endpoints.
861 */
862 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
863 pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
864 !pdev->bus->parent->self) {
865 link->root = link;
866 } else {
867 struct pcie_link_state *parent;
868
869 parent = pdev->bus->parent->self->link_state;
870 if (!parent) {
871 kfree(link);
872 return NULL;
873 }
874
875 link->parent = parent;
876 link->root = link->parent->root;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000877 }
878
879 list_add(&link->sibling, &link_list);
880 pdev->link_state = link;
881 return link;
882}
883
Olivier Deprez157378f2022-04-04 15:47:50 +0200884static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev)
885{
886 struct pci_dev *child;
887
888 list_for_each_entry(child, &pdev->subordinate->devices, bus_list)
889 sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group);
890}
891
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000892/*
893 * pcie_aspm_init_link_state: Initiate PCI express link state.
894 * It is called after the pcie and its children devices are scanned.
895 * @pdev: the root port or switch downstream port
896 */
897void pcie_aspm_init_link_state(struct pci_dev *pdev)
898{
899 struct pcie_link_state *link;
900 int blacklist = !!pcie_aspm_sanity_check(pdev);
901
902 if (!aspm_support_enabled)
903 return;
904
905 if (pdev->link_state)
906 return;
907
908 /*
909 * We allocate pcie_link_state for the component on the upstream
David Brazdil0f672f62019-12-10 10:32:29 +0000910 * end of a Link, so there's nothing to do unless this device is
911 * downstream port.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000912 */
David Brazdil0f672f62019-12-10 10:32:29 +0000913 if (!pcie_downstream_port(pdev))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000914 return;
915
916 /* VIA has a strange chipset, root port is under a bridge */
917 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
918 pdev->bus->self)
919 return;
920
921 down_read(&pci_bus_sem);
922 if (list_empty(&pdev->subordinate->devices))
923 goto out;
924
925 mutex_lock(&aspm_lock);
926 link = alloc_pcie_link_state(pdev);
927 if (!link)
928 goto unlock;
929 /*
930 * Setup initial ASPM state. Note that we need to configure
931 * upstream links also because capable state of them can be
932 * update through pcie_aspm_cap_init().
933 */
934 pcie_aspm_cap_init(link, blacklist);
935
936 /* Setup initial Clock PM state */
937 pcie_clkpm_cap_init(link, blacklist);
938
939 /*
940 * At this stage drivers haven't had an opportunity to change the
941 * link policy setting. Enabling ASPM on broken hardware can cripple
942 * it even before the driver has had a chance to disable ASPM, so
943 * default to a safe level right now. If we're enabling ASPM beyond
944 * the BIOS's expectation, we'll do so once pci_enable_device() is
945 * called.
946 */
947 if (aspm_policy != POLICY_POWERSAVE &&
948 aspm_policy != POLICY_POWER_SUPERSAVE) {
949 pcie_config_aspm_path(link);
950 pcie_set_clkpm(link, policy_to_clkpm_state(link));
951 }
952
Olivier Deprez157378f2022-04-04 15:47:50 +0200953 pcie_aspm_update_sysfs_visibility(pdev);
954
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000955unlock:
956 mutex_unlock(&aspm_lock);
957out:
958 up_read(&pci_bus_sem);
959}
960
961/* Recheck latencies and update aspm_capable for links under the root */
962static void pcie_update_aspm_capable(struct pcie_link_state *root)
963{
964 struct pcie_link_state *link;
965 BUG_ON(root->parent);
966 list_for_each_entry(link, &link_list, sibling) {
967 if (link->root != root)
968 continue;
969 link->aspm_capable = link->aspm_support;
970 }
971 list_for_each_entry(link, &link_list, sibling) {
972 struct pci_dev *child;
973 struct pci_bus *linkbus = link->pdev->subordinate;
974 if (link->root != root)
975 continue;
976 list_for_each_entry(child, &linkbus->devices, bus_list) {
977 if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
978 (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
979 continue;
980 pcie_aspm_check_latency(child);
981 }
982 }
983}
984
985/* @pdev: the endpoint device */
986void pcie_aspm_exit_link_state(struct pci_dev *pdev)
987{
988 struct pci_dev *parent = pdev->bus->self;
989 struct pcie_link_state *link, *root, *parent_link;
990
991 if (!parent || !parent->link_state)
992 return;
993
994 down_read(&pci_bus_sem);
995 mutex_lock(&aspm_lock);
996 /*
997 * All PCIe functions are in one slot, remove one function will remove
998 * the whole slot, so just wait until we are the last function left.
999 */
1000 if (!list_empty(&parent->subordinate->devices))
1001 goto out;
1002
1003 link = parent->link_state;
1004 root = link->root;
1005 parent_link = link->parent;
1006
1007 /* All functions are removed, so just disable ASPM for the link */
1008 pcie_config_aspm_link(link, 0);
1009 list_del(&link->sibling);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001010 /* Clock PM is for endpoint device */
1011 free_link_state(link);
1012
1013 /* Recheck latencies and configure upstream links */
1014 if (parent_link) {
1015 pcie_update_aspm_capable(root);
1016 pcie_config_aspm_path(parent_link);
1017 }
1018out:
1019 mutex_unlock(&aspm_lock);
1020 up_read(&pci_bus_sem);
1021}
1022
1023/* @pdev: the root port or switch downstream port */
1024void pcie_aspm_pm_state_change(struct pci_dev *pdev)
1025{
1026 struct pcie_link_state *link = pdev->link_state;
1027
1028 if (aspm_disabled || !link)
1029 return;
1030 /*
1031 * Devices changed PM state, we should recheck if latency
1032 * meets all functions' requirement
1033 */
1034 down_read(&pci_bus_sem);
1035 mutex_lock(&aspm_lock);
1036 pcie_update_aspm_capable(link->root);
1037 pcie_config_aspm_path(link);
1038 mutex_unlock(&aspm_lock);
1039 up_read(&pci_bus_sem);
1040}
1041
1042void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
1043{
1044 struct pcie_link_state *link = pdev->link_state;
1045
1046 if (aspm_disabled || !link)
1047 return;
1048
1049 if (aspm_policy != POLICY_POWERSAVE &&
1050 aspm_policy != POLICY_POWER_SUPERSAVE)
1051 return;
1052
1053 down_read(&pci_bus_sem);
1054 mutex_lock(&aspm_lock);
1055 pcie_config_aspm_path(link);
1056 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1057 mutex_unlock(&aspm_lock);
1058 up_read(&pci_bus_sem);
1059}
1060
Olivier Deprez157378f2022-04-04 15:47:50 +02001061static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001062{
Olivier Deprez157378f2022-04-04 15:47:50 +02001063 struct pci_dev *bridge;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001064
1065 if (!pci_is_pcie(pdev))
Olivier Deprez157378f2022-04-04 15:47:50 +02001066 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001067
Olivier Deprez157378f2022-04-04 15:47:50 +02001068 bridge = pci_upstream_bridge(pdev);
1069 if (!bridge || !pci_is_pcie(bridge))
1070 return NULL;
1071
1072 return bridge->link_state;
1073}
1074
1075static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
1076{
1077 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1078
1079 if (!link)
David Brazdil0f672f62019-12-10 10:32:29 +00001080 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001081 /*
1082 * A driver requested that ASPM be disabled on this device, but
1083 * if we don't have permission to manage ASPM (e.g., on ACPI
1084 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1085 * the _OSC method), we can't honor that request. Windows has
1086 * a similar mechanism using "PciASPMOptOut", which is also
1087 * ignored in this situation.
1088 */
1089 if (aspm_disabled) {
1090 pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
David Brazdil0f672f62019-12-10 10:32:29 +00001091 return -EPERM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001092 }
1093
1094 if (sem)
1095 down_read(&pci_bus_sem);
1096 mutex_lock(&aspm_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001097 if (state & PCIE_LINK_STATE_L0S)
1098 link->aspm_disable |= ASPM_STATE_L0S;
1099 if (state & PCIE_LINK_STATE_L1)
Olivier Deprez157378f2022-04-04 15:47:50 +02001100 /* L1 PM substates require L1 */
1101 link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
1102 if (state & PCIE_LINK_STATE_L1_1)
1103 link->aspm_disable |= ASPM_STATE_L1_1;
1104 if (state & PCIE_LINK_STATE_L1_2)
1105 link->aspm_disable |= ASPM_STATE_L1_2;
1106 if (state & PCIE_LINK_STATE_L1_1_PCIPM)
1107 link->aspm_disable |= ASPM_STATE_L1_1_PCIPM;
1108 if (state & PCIE_LINK_STATE_L1_2_PCIPM)
1109 link->aspm_disable |= ASPM_STATE_L1_2_PCIPM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001110 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1111
Olivier Deprez0e641232021-09-23 10:07:05 +02001112 if (state & PCIE_LINK_STATE_CLKPM)
1113 link->clkpm_disable = 1;
1114 pcie_set_clkpm(link, policy_to_clkpm_state(link));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001115 mutex_unlock(&aspm_lock);
1116 if (sem)
1117 up_read(&pci_bus_sem);
David Brazdil0f672f62019-12-10 10:32:29 +00001118
1119 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001120}
1121
David Brazdil0f672f62019-12-10 10:32:29 +00001122int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001123{
David Brazdil0f672f62019-12-10 10:32:29 +00001124 return __pci_disable_link_state(pdev, state, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001125}
1126EXPORT_SYMBOL(pci_disable_link_state_locked);
1127
1128/**
1129 * pci_disable_link_state - Disable device's link state, so the link will
1130 * never enter specific states. Note that if the BIOS didn't grant ASPM
1131 * control to the OS, this does nothing because we can't touch the LNKCTL
David Brazdil0f672f62019-12-10 10:32:29 +00001132 * register. Returns 0 or a negative errno.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001133 *
1134 * @pdev: PCI device
1135 * @state: ASPM link state to disable
1136 */
David Brazdil0f672f62019-12-10 10:32:29 +00001137int pci_disable_link_state(struct pci_dev *pdev, int state)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001138{
David Brazdil0f672f62019-12-10 10:32:29 +00001139 return __pci_disable_link_state(pdev, state, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001140}
1141EXPORT_SYMBOL(pci_disable_link_state);
1142
1143static int pcie_aspm_set_policy(const char *val,
1144 const struct kernel_param *kp)
1145{
1146 int i;
1147 struct pcie_link_state *link;
1148
1149 if (aspm_disabled)
1150 return -EPERM;
1151 i = sysfs_match_string(policy_str, val);
1152 if (i < 0)
1153 return i;
1154 if (i == aspm_policy)
1155 return 0;
1156
1157 down_read(&pci_bus_sem);
1158 mutex_lock(&aspm_lock);
1159 aspm_policy = i;
1160 list_for_each_entry(link, &link_list, sibling) {
1161 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1162 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1163 }
1164 mutex_unlock(&aspm_lock);
1165 up_read(&pci_bus_sem);
1166 return 0;
1167}
1168
1169static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
1170{
1171 int i, cnt = 0;
1172 for (i = 0; i < ARRAY_SIZE(policy_str); i++)
1173 if (i == aspm_policy)
1174 cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
1175 else
1176 cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
Olivier Deprez0e641232021-09-23 10:07:05 +02001177 cnt += sprintf(buffer + cnt, "\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001178 return cnt;
1179}
1180
1181module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
1182 NULL, 0644);
1183
David Brazdil0f672f62019-12-10 10:32:29 +00001184/**
1185 * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
1186 * @pdev: Target device.
Olivier Deprez157378f2022-04-04 15:47:50 +02001187 *
1188 * Relies on the upstream bridge's link_state being valid. The link_state
1189 * is deallocated only when the last child of the bridge (i.e., @pdev or a
1190 * sibling) is removed, and the caller should be holding a reference to
1191 * @pdev, so this should be safe.
David Brazdil0f672f62019-12-10 10:32:29 +00001192 */
1193bool pcie_aspm_enabled(struct pci_dev *pdev)
1194{
Olivier Deprez157378f2022-04-04 15:47:50 +02001195 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
David Brazdil0f672f62019-12-10 10:32:29 +00001196
Olivier Deprez157378f2022-04-04 15:47:50 +02001197 if (!link)
David Brazdil0f672f62019-12-10 10:32:29 +00001198 return false;
1199
Olivier Deprez157378f2022-04-04 15:47:50 +02001200 return link->aspm_enabled;
David Brazdil0f672f62019-12-10 10:32:29 +00001201}
1202EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
1203
Olivier Deprez157378f2022-04-04 15:47:50 +02001204static ssize_t aspm_attr_show_common(struct device *dev,
1205 struct device_attribute *attr,
1206 char *buf, u8 state)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001207{
1208 struct pci_dev *pdev = to_pci_dev(dev);
Olivier Deprez157378f2022-04-04 15:47:50 +02001209 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001210
Olivier Deprez157378f2022-04-04 15:47:50 +02001211 return sprintf(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
1212}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001213
Olivier Deprez157378f2022-04-04 15:47:50 +02001214static ssize_t aspm_attr_store_common(struct device *dev,
1215 struct device_attribute *attr,
1216 const char *buf, size_t len, u8 state)
1217{
1218 struct pci_dev *pdev = to_pci_dev(dev);
1219 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1220 bool state_enable;
1221
1222 if (strtobool(buf, &state_enable) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001223 return -EINVAL;
1224
1225 down_read(&pci_bus_sem);
1226 mutex_lock(&aspm_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +02001227
1228 if (state_enable) {
1229 link->aspm_disable &= ~state;
1230 /* need to enable L1 for substates */
1231 if (state & ASPM_STATE_L1SS)
1232 link->aspm_disable &= ~ASPM_STATE_L1;
1233 } else {
1234 link->aspm_disable |= state;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001235 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001236
1237 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1238
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001239 mutex_unlock(&aspm_lock);
1240 up_read(&pci_bus_sem);
Olivier Deprez157378f2022-04-04 15:47:50 +02001241
1242 return len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001243}
1244
Olivier Deprez157378f2022-04-04 15:47:50 +02001245#define ASPM_ATTR(_f, _s) \
1246static ssize_t _f##_show(struct device *dev, \
1247 struct device_attribute *attr, char *buf) \
1248{ return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); } \
1249 \
1250static ssize_t _f##_store(struct device *dev, \
1251 struct device_attribute *attr, \
1252 const char *buf, size_t len) \
1253{ return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001254
Olivier Deprez157378f2022-04-04 15:47:50 +02001255ASPM_ATTR(l0s_aspm, L0S)
1256ASPM_ATTR(l1_aspm, L1)
1257ASPM_ATTR(l1_1_aspm, L1_1)
1258ASPM_ATTR(l1_2_aspm, L1_2)
1259ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM)
1260ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001261
Olivier Deprez157378f2022-04-04 15:47:50 +02001262static ssize_t clkpm_show(struct device *dev,
1263 struct device_attribute *attr, char *buf)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001264{
1265 struct pci_dev *pdev = to_pci_dev(dev);
Olivier Deprez157378f2022-04-04 15:47:50 +02001266 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001267
Olivier Deprez157378f2022-04-04 15:47:50 +02001268 return sprintf(buf, "%d\n", link->clkpm_enabled);
1269}
1270
1271static ssize_t clkpm_store(struct device *dev,
1272 struct device_attribute *attr,
1273 const char *buf, size_t len)
1274{
1275 struct pci_dev *pdev = to_pci_dev(dev);
1276 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1277 bool state_enable;
1278
1279 if (strtobool(buf, &state_enable) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001280 return -EINVAL;
1281
1282 down_read(&pci_bus_sem);
1283 mutex_lock(&aspm_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +02001284
1285 link->clkpm_disable = !state_enable;
1286 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1287
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001288 mutex_unlock(&aspm_lock);
1289 up_read(&pci_bus_sem);
1290
Olivier Deprez157378f2022-04-04 15:47:50 +02001291 return len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001292}
1293
Olivier Deprez157378f2022-04-04 15:47:50 +02001294static DEVICE_ATTR_RW(clkpm);
1295static DEVICE_ATTR_RW(l0s_aspm);
1296static DEVICE_ATTR_RW(l1_aspm);
1297static DEVICE_ATTR_RW(l1_1_aspm);
1298static DEVICE_ATTR_RW(l1_2_aspm);
1299static DEVICE_ATTR_RW(l1_1_pcipm);
1300static DEVICE_ATTR_RW(l1_2_pcipm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001301
Olivier Deprez157378f2022-04-04 15:47:50 +02001302static struct attribute *aspm_ctrl_attrs[] = {
1303 &dev_attr_clkpm.attr,
1304 &dev_attr_l0s_aspm.attr,
1305 &dev_attr_l1_aspm.attr,
1306 &dev_attr_l1_1_aspm.attr,
1307 &dev_attr_l1_2_aspm.attr,
1308 &dev_attr_l1_1_pcipm.attr,
1309 &dev_attr_l1_2_pcipm.attr,
1310 NULL
1311};
1312
1313static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
1314 struct attribute *a, int n)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001315{
Olivier Deprez157378f2022-04-04 15:47:50 +02001316 struct device *dev = kobj_to_dev(kobj);
1317 struct pci_dev *pdev = to_pci_dev(dev);
1318 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1319 static const u8 aspm_state_map[] = {
1320 ASPM_STATE_L0S,
1321 ASPM_STATE_L1,
1322 ASPM_STATE_L1_1,
1323 ASPM_STATE_L1_2,
1324 ASPM_STATE_L1_1_PCIPM,
1325 ASPM_STATE_L1_2_PCIPM,
1326 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001327
Olivier Deprez157378f2022-04-04 15:47:50 +02001328 if (aspm_disabled || !link)
1329 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001330
Olivier Deprez157378f2022-04-04 15:47:50 +02001331 if (n == 0)
1332 return link->clkpm_capable ? a->mode : 0;
1333
1334 return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001335}
1336
Olivier Deprez157378f2022-04-04 15:47:50 +02001337const struct attribute_group aspm_ctrl_attr_group = {
1338 .name = "link",
1339 .attrs = aspm_ctrl_attrs,
1340 .is_visible = aspm_ctrl_attrs_are_visible,
1341};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001342
1343static int __init pcie_aspm_disable(char *str)
1344{
1345 if (!strcmp(str, "off")) {
1346 aspm_policy = POLICY_DEFAULT;
1347 aspm_disabled = 1;
1348 aspm_support_enabled = false;
1349 printk(KERN_INFO "PCIe ASPM is disabled\n");
1350 } else if (!strcmp(str, "force")) {
1351 aspm_force = 1;
1352 printk(KERN_INFO "PCIe ASPM is forcibly enabled\n");
1353 }
1354 return 1;
1355}
1356
1357__setup("pcie_aspm=", pcie_aspm_disable);
1358
1359void pcie_no_aspm(void)
1360{
1361 /*
1362 * Disabling ASPM is intended to prevent the kernel from modifying
1363 * existing hardware state, not to clear existing state. To that end:
1364 * (a) set policy to POLICY_DEFAULT in order to avoid changing state
1365 * (b) prevent userspace from changing policy
1366 */
1367 if (!aspm_force) {
1368 aspm_policy = POLICY_DEFAULT;
1369 aspm_disabled = 1;
1370 }
1371}
1372
1373bool pcie_aspm_support_enabled(void)
1374{
1375 return aspm_support_enabled;
1376}
1377EXPORT_SYMBOL(pcie_aspm_support_enabled);