blob: 88b996764ff95ab4abeb72c332ec20154a7130a0 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * PCI Express PCI Hot Plug Driver
4 *
5 * Copyright (C) 1995,2001 Compaq Computer Corporation
6 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
7 * Copyright (C) 2001 IBM Corp.
8 * Copyright (C) 2003-2004 Intel Corporation
9 *
10 * All rights reserved.
11 *
12 * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
13 */
14
David Brazdil0f672f62019-12-10 10:32:29 +000015#define dev_fmt(fmt) "pciehp: " fmt
16
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017#include <linux/kernel.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018#include <linux/types.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019#include <linux/jiffies.h>
20#include <linux/kthread.h>
21#include <linux/pci.h>
22#include <linux/pm_runtime.h>
23#include <linux/interrupt.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024#include <linux/slab.h>
25
26#include "../pci.h"
27#include "pciehp.h"
28
29static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
30{
31 return ctrl->pcie->port;
32}
33
34static irqreturn_t pciehp_isr(int irq, void *dev_id);
35static irqreturn_t pciehp_ist(int irq, void *dev_id);
36static int pciehp_poll(void *data);
37
38static inline int pciehp_request_irq(struct controller *ctrl)
39{
40 int retval, irq = ctrl->pcie->irq;
41
42 if (pciehp_poll_mode) {
43 ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
44 "pciehp_poll-%s",
David Brazdil0f672f62019-12-10 10:32:29 +000045 slot_name(ctrl));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046 return PTR_ERR_OR_ZERO(ctrl->poll_thread);
47 }
48
49 /* Installs the interrupt handler */
50 retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
David Brazdil0f672f62019-12-10 10:32:29 +000051 IRQF_SHARED, "pciehp", ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052 if (retval)
53 ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
54 irq);
55 return retval;
56}
57
58static inline void pciehp_free_irq(struct controller *ctrl)
59{
60 if (pciehp_poll_mode)
61 kthread_stop(ctrl->poll_thread);
62 else
63 free_irq(ctrl->pcie->irq, ctrl);
64}
65
66static int pcie_poll_cmd(struct controller *ctrl, int timeout)
67{
68 struct pci_dev *pdev = ctrl_dev(ctrl);
69 u16 slot_status;
70
71 while (true) {
72 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
73 if (slot_status == (u16) ~0) {
74 ctrl_info(ctrl, "%s: no response from device\n",
75 __func__);
76 return 0;
77 }
78
79 if (slot_status & PCI_EXP_SLTSTA_CC) {
80 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
81 PCI_EXP_SLTSTA_CC);
82 return 1;
83 }
84 if (timeout < 0)
85 break;
86 msleep(10);
87 timeout -= 10;
88 }
89 return 0; /* timeout */
90}
91
92static void pcie_wait_cmd(struct controller *ctrl)
93{
94 unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
95 unsigned long duration = msecs_to_jiffies(msecs);
96 unsigned long cmd_timeout = ctrl->cmd_started + duration;
97 unsigned long now, timeout;
98 int rc;
99
100 /*
101 * If the controller does not generate notifications for command
102 * completions, we never need to wait between writes.
103 */
104 if (NO_CMD_CMPL(ctrl))
105 return;
106
107 if (!ctrl->cmd_busy)
108 return;
109
110 /*
111 * Even if the command has already timed out, we want to call
112 * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
113 */
114 now = jiffies;
115 if (time_before_eq(cmd_timeout, now))
116 timeout = 1;
117 else
118 timeout = cmd_timeout - now;
119
120 if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
121 ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
122 rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
123 else
124 rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
125
126 if (!rc)
127 ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
128 ctrl->slot_ctrl,
129 jiffies_to_msecs(jiffies - ctrl->cmd_started));
130}
131
132#define CC_ERRATUM_MASK (PCI_EXP_SLTCTL_PCC | \
133 PCI_EXP_SLTCTL_PIC | \
134 PCI_EXP_SLTCTL_AIC | \
135 PCI_EXP_SLTCTL_EIC)
136
137static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
138 u16 mask, bool wait)
139{
140 struct pci_dev *pdev = ctrl_dev(ctrl);
141 u16 slot_ctrl_orig, slot_ctrl;
142
143 mutex_lock(&ctrl->ctrl_lock);
144
145 /*
146 * Always wait for any previous command that might still be in progress
147 */
148 pcie_wait_cmd(ctrl);
149
150 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
151 if (slot_ctrl == (u16) ~0) {
152 ctrl_info(ctrl, "%s: no response from device\n", __func__);
153 goto out;
154 }
155
156 slot_ctrl_orig = slot_ctrl;
157 slot_ctrl &= ~mask;
158 slot_ctrl |= (cmd & mask);
159 ctrl->cmd_busy = 1;
160 smp_mb();
David Brazdil0f672f62019-12-10 10:32:29 +0000161 ctrl->slot_ctrl = slot_ctrl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000162 pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
163 ctrl->cmd_started = jiffies;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000164
165 /*
166 * Controllers with the Intel CF118 and similar errata advertise
167 * Command Completed support, but they only set Command Completed
168 * if we change the "Control" bits for power, power indicator,
169 * attention indicator, or interlock. If we only change the
170 * "Enable" bits, they never set the Command Completed bit.
171 */
172 if (pdev->broken_cmd_compl &&
173 (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
174 ctrl->cmd_busy = 0;
175
176 /*
177 * Optionally wait for the hardware to be ready for a new command,
178 * indicating completion of the above issued command.
179 */
180 if (wait)
181 pcie_wait_cmd(ctrl);
182
183out:
184 mutex_unlock(&ctrl->ctrl_lock);
185}
186
187/**
188 * pcie_write_cmd - Issue controller command
189 * @ctrl: controller to which the command is issued
190 * @cmd: command value written to slot control register
191 * @mask: bitmask of slot control register to be modified
192 */
193static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
194{
195 pcie_do_write_cmd(ctrl, cmd, mask, true);
196}
197
198/* Same as above without waiting for the hardware to latch */
199static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
200{
201 pcie_do_write_cmd(ctrl, cmd, mask, false);
202}
203
Olivier Deprez0e641232021-09-23 10:07:05 +0200204/**
205 * pciehp_check_link_active() - Is the link active
206 * @ctrl: PCIe hotplug controller
207 *
208 * Check whether the downstream link is currently active. Note it is
209 * possible that the card is removed immediately after this so the
210 * caller may need to take it into account.
211 *
212 * If the hotplug controller itself is not available anymore returns
213 * %-ENODEV.
214 */
215int pciehp_check_link_active(struct controller *ctrl)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000216{
217 struct pci_dev *pdev = ctrl_dev(ctrl);
218 u16 lnk_status;
Olivier Deprez0e641232021-09-23 10:07:05 +0200219 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000220
Olivier Deprez0e641232021-09-23 10:07:05 +0200221 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
222 if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0)
223 return -ENODEV;
224
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000225 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
Olivier Deprez0e641232021-09-23 10:07:05 +0200226 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000227
228 return ret;
229}
230
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
232{
233 u32 l;
234 int count = 0;
235 int delay = 1000, step = 20;
236 bool found = false;
237
238 do {
239 found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
240 count++;
241
242 if (found)
243 break;
244
245 msleep(step);
246 delay -= step;
247 } while (delay > 0);
248
David Brazdil0f672f62019-12-10 10:32:29 +0000249 if (count > 1)
250 pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000251 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
252 PCI_FUNC(devfn), count, step, l);
253
254 return found;
255}
256
257int pciehp_check_link_status(struct controller *ctrl)
258{
259 struct pci_dev *pdev = ctrl_dev(ctrl);
260 bool found;
261 u16 lnk_status;
262
David Brazdil0f672f62019-12-10 10:32:29 +0000263 if (!pcie_wait_for_link(pdev, true))
264 return -1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000266 found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
267 PCI_DEVFN(0, 0));
268
269 /* ignore link or presence changes up to this point */
270 if (found)
271 atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
272 &ctrl->pending_events);
273
274 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
275 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
276 if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
277 !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
278 ctrl_err(ctrl, "link training error: status %#06x\n",
279 lnk_status);
280 return -1;
281 }
282
283 pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
284
285 if (!found)
286 return -1;
287
288 return 0;
289}
290
291static int __pciehp_link_set(struct controller *ctrl, bool enable)
292{
293 struct pci_dev *pdev = ctrl_dev(ctrl);
294 u16 lnk_ctrl;
295
296 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
297
298 if (enable)
299 lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
300 else
301 lnk_ctrl |= PCI_EXP_LNKCTL_LD;
302
303 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
304 ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
305 return 0;
306}
307
308static int pciehp_link_enable(struct controller *ctrl)
309{
310 return __pciehp_link_set(ctrl, true);
311}
312
313int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
314 u8 *status)
315{
David Brazdil0f672f62019-12-10 10:32:29 +0000316 struct controller *ctrl = to_ctrl(hotplug_slot);
317 struct pci_dev *pdev = ctrl_dev(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000318 u16 slot_ctrl;
319
320 pci_config_pm_runtime_get(pdev);
321 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
322 pci_config_pm_runtime_put(pdev);
323 *status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
324 return 0;
325}
326
David Brazdil0f672f62019-12-10 10:32:29 +0000327int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000328{
David Brazdil0f672f62019-12-10 10:32:29 +0000329 struct controller *ctrl = to_ctrl(hotplug_slot);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000330 struct pci_dev *pdev = ctrl_dev(ctrl);
331 u16 slot_ctrl;
332
333 pci_config_pm_runtime_get(pdev);
334 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
335 pci_config_pm_runtime_put(pdev);
336 ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
337 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
338
339 switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
340 case PCI_EXP_SLTCTL_ATTN_IND_ON:
341 *status = 1; /* On */
342 break;
343 case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
344 *status = 2; /* Blink */
345 break;
346 case PCI_EXP_SLTCTL_ATTN_IND_OFF:
347 *status = 0; /* Off */
348 break;
349 default:
350 *status = 0xFF;
351 break;
352 }
David Brazdil0f672f62019-12-10 10:32:29 +0000353
354 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000355}
356
David Brazdil0f672f62019-12-10 10:32:29 +0000357void pciehp_get_power_status(struct controller *ctrl, u8 *status)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000358{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000359 struct pci_dev *pdev = ctrl_dev(ctrl);
360 u16 slot_ctrl;
361
362 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
363 ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
364 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
365
366 switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
367 case PCI_EXP_SLTCTL_PWR_ON:
368 *status = 1; /* On */
369 break;
370 case PCI_EXP_SLTCTL_PWR_OFF:
371 *status = 0; /* Off */
372 break;
373 default:
374 *status = 0xFF;
375 break;
376 }
377}
378
David Brazdil0f672f62019-12-10 10:32:29 +0000379void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000380{
David Brazdil0f672f62019-12-10 10:32:29 +0000381 struct pci_dev *pdev = ctrl_dev(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000382 u16 slot_status;
383
384 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
385 *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
386}
387
Olivier Deprez0e641232021-09-23 10:07:05 +0200388/**
389 * pciehp_card_present() - Is the card present
390 * @ctrl: PCIe hotplug controller
391 *
392 * Function checks whether the card is currently present in the slot and
393 * in that case returns true. Note it is possible that the card is
394 * removed immediately after the check so the caller may need to take
395 * this into account.
396 *
397 * It the hotplug controller itself is not available anymore returns
398 * %-ENODEV.
399 */
400int pciehp_card_present(struct controller *ctrl)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000401{
David Brazdil0f672f62019-12-10 10:32:29 +0000402 struct pci_dev *pdev = ctrl_dev(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000403 u16 slot_status;
Olivier Deprez0e641232021-09-23 10:07:05 +0200404 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000405
Olivier Deprez0e641232021-09-23 10:07:05 +0200406 ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
407 if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0)
408 return -ENODEV;
409
410 return !!(slot_status & PCI_EXP_SLTSTA_PDS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000411}
412
David Brazdil0f672f62019-12-10 10:32:29 +0000413/**
414 * pciehp_card_present_or_link_active() - whether given slot is occupied
415 * @ctrl: PCIe hotplug controller
416 *
417 * Unlike pciehp_card_present(), which determines presence solely from the
418 * Presence Detect State bit, this helper also returns true if the Link Active
419 * bit is set. This is a concession to broken hotplug ports which hardwire
420 * Presence Detect State to zero, such as Wilocity's [1ae9:0200].
Olivier Deprez0e641232021-09-23 10:07:05 +0200421 *
422 * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug
423 * port is not present anymore returns %-ENODEV.
David Brazdil0f672f62019-12-10 10:32:29 +0000424 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200425int pciehp_card_present_or_link_active(struct controller *ctrl)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426{
Olivier Deprez0e641232021-09-23 10:07:05 +0200427 int ret;
428
429 ret = pciehp_card_present(ctrl);
430 if (ret)
431 return ret;
432
433 return pciehp_check_link_active(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +0000434}
435
436int pciehp_query_power_fault(struct controller *ctrl)
437{
438 struct pci_dev *pdev = ctrl_dev(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000439 u16 slot_status;
440
441 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
442 return !!(slot_status & PCI_EXP_SLTSTA_PFD);
443}
444
445int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
446 u8 status)
447{
David Brazdil0f672f62019-12-10 10:32:29 +0000448 struct controller *ctrl = to_ctrl(hotplug_slot);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000449 struct pci_dev *pdev = ctrl_dev(ctrl);
450
451 pci_config_pm_runtime_get(pdev);
452 pcie_write_cmd_nowait(ctrl, status << 6,
453 PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
454 pci_config_pm_runtime_put(pdev);
455 return 0;
456}
457
David Brazdil0f672f62019-12-10 10:32:29 +0000458/**
459 * pciehp_set_indicators() - set attention indicator, power indicator, or both
460 * @ctrl: PCIe hotplug controller
461 * @pwr: one of:
462 * PCI_EXP_SLTCTL_PWR_IND_ON
463 * PCI_EXP_SLTCTL_PWR_IND_BLINK
464 * PCI_EXP_SLTCTL_PWR_IND_OFF
465 * @attn: one of:
466 * PCI_EXP_SLTCTL_ATTN_IND_ON
467 * PCI_EXP_SLTCTL_ATTN_IND_BLINK
468 * PCI_EXP_SLTCTL_ATTN_IND_OFF
469 *
470 * Either @pwr or @attn can also be INDICATOR_NOOP to leave that indicator
471 * unchanged.
472 */
473void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000474{
David Brazdil0f672f62019-12-10 10:32:29 +0000475 u16 cmd = 0, mask = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000476
David Brazdil0f672f62019-12-10 10:32:29 +0000477 if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) {
478 cmd |= (pwr & PCI_EXP_SLTCTL_PIC);
479 mask |= PCI_EXP_SLTCTL_PIC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480 }
David Brazdil0f672f62019-12-10 10:32:29 +0000481
482 if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) {
483 cmd |= (attn & PCI_EXP_SLTCTL_AIC);
484 mask |= PCI_EXP_SLTCTL_AIC;
485 }
486
487 if (cmd) {
488 pcie_write_cmd_nowait(ctrl, cmd, mask);
489 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
490 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
491 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000492}
493
David Brazdil0f672f62019-12-10 10:32:29 +0000494int pciehp_power_on_slot(struct controller *ctrl)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000495{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000496 struct pci_dev *pdev = ctrl_dev(ctrl);
497 u16 slot_status;
498 int retval;
499
500 /* Clear power-fault bit from previous power failures */
501 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
502 if (slot_status & PCI_EXP_SLTSTA_PFD)
503 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
504 PCI_EXP_SLTSTA_PFD);
505 ctrl->power_fault_detected = 0;
506
507 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
508 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
509 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
510 PCI_EXP_SLTCTL_PWR_ON);
511
512 retval = pciehp_link_enable(ctrl);
513 if (retval)
514 ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
515
516 return retval;
517}
518
David Brazdil0f672f62019-12-10 10:32:29 +0000519void pciehp_power_off_slot(struct controller *ctrl)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000520{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000521 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
522 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
523 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
524 PCI_EXP_SLTCTL_PWR_OFF);
525}
526
527static irqreturn_t pciehp_isr(int irq, void *dev_id)
528{
529 struct controller *ctrl = (struct controller *)dev_id;
530 struct pci_dev *pdev = ctrl_dev(ctrl);
531 struct device *parent = pdev->dev.parent;
Olivier Deprez0e641232021-09-23 10:07:05 +0200532 u16 status, events = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000533
534 /*
David Brazdil0f672f62019-12-10 10:32:29 +0000535 * Interrupts only occur in D3hot or shallower and only if enabled
536 * in the Slot Control register (PCIe r4.0, sec 6.7.3.4).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000537 */
David Brazdil0f672f62019-12-10 10:32:29 +0000538 if (pdev->current_state == PCI_D3cold ||
539 (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000540 return IRQ_NONE;
541
542 /*
543 * Keep the port accessible by holding a runtime PM ref on its parent.
544 * Defer resume of the parent to the IRQ thread if it's suspended.
545 * Mask the interrupt until then.
546 */
547 if (parent) {
548 pm_runtime_get_noresume(parent);
549 if (!pm_runtime_active(parent)) {
550 pm_runtime_put(parent);
551 disable_irq_nosync(irq);
552 atomic_or(RERUN_ISR, &ctrl->pending_events);
553 return IRQ_WAKE_THREAD;
554 }
555 }
556
Olivier Deprez0e641232021-09-23 10:07:05 +0200557read_status:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000558 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
559 if (status == (u16) ~0) {
560 ctrl_info(ctrl, "%s: no response from device\n", __func__);
561 if (parent)
562 pm_runtime_put(parent);
563 return IRQ_NONE;
564 }
565
566 /*
567 * Slot Status contains plain status bits as well as event
568 * notification bits; right now we only want the event bits.
569 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200570 status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
571 PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
572 PCI_EXP_SLTSTA_DLLSC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000573
574 /*
575 * If we've already reported a power fault, don't report it again
576 * until we've done something to handle it.
577 */
578 if (ctrl->power_fault_detected)
Olivier Deprez0e641232021-09-23 10:07:05 +0200579 status &= ~PCI_EXP_SLTSTA_PFD;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000580
Olivier Deprez0e641232021-09-23 10:07:05 +0200581 events |= status;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000582 if (!events) {
583 if (parent)
584 pm_runtime_put(parent);
585 return IRQ_NONE;
586 }
587
Olivier Deprez0e641232021-09-23 10:07:05 +0200588 if (status) {
589 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
590
591 /*
592 * In MSI mode, all event bits must be zero before the port
593 * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
594 * So re-read the Slot Status register in case a bit was set
595 * between read and write.
596 */
597 if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
598 goto read_status;
599 }
600
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000601 ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
602 if (parent)
603 pm_runtime_put(parent);
604
605 /*
606 * Command Completed notifications are not deferred to the
607 * IRQ thread because it may be waiting for their arrival.
608 */
609 if (events & PCI_EXP_SLTSTA_CC) {
610 ctrl->cmd_busy = 0;
611 smp_mb();
612 wake_up(&ctrl->queue);
613
614 if (events == PCI_EXP_SLTSTA_CC)
615 return IRQ_HANDLED;
616
617 events &= ~PCI_EXP_SLTSTA_CC;
618 }
619
620 if (pdev->ignore_hotplug) {
621 ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
622 return IRQ_HANDLED;
623 }
624
625 /* Save pending events for consumption by IRQ thread. */
626 atomic_or(events, &ctrl->pending_events);
627 return IRQ_WAKE_THREAD;
628}
629
630static irqreturn_t pciehp_ist(int irq, void *dev_id)
631{
632 struct controller *ctrl = (struct controller *)dev_id;
633 struct pci_dev *pdev = ctrl_dev(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000634 irqreturn_t ret;
635 u32 events;
636
Olivier Deprez0e641232021-09-23 10:07:05 +0200637 ctrl->ist_running = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000638 pci_config_pm_runtime_get(pdev);
639
640 /* rerun pciehp_isr() if the port was inaccessible on interrupt */
641 if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
642 ret = pciehp_isr(irq, dev_id);
643 enable_irq(irq);
Olivier Deprez0e641232021-09-23 10:07:05 +0200644 if (ret != IRQ_WAKE_THREAD)
645 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000646 }
647
648 synchronize_hardirq(irq);
649 events = atomic_xchg(&ctrl->pending_events, 0);
650 if (!events) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200651 ret = IRQ_NONE;
652 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000653 }
654
655 /* Check Attention Button Pressed */
656 if (events & PCI_EXP_SLTSTA_ABP) {
657 ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
David Brazdil0f672f62019-12-10 10:32:29 +0000658 slot_name(ctrl));
659 pciehp_handle_button_press(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000660 }
661
662 /* Check Power Fault Detected */
663 if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
664 ctrl->power_fault_detected = 1;
David Brazdil0f672f62019-12-10 10:32:29 +0000665 ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
666 pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
667 PCI_EXP_SLTCTL_ATTN_IND_ON);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000668 }
669
670 /*
671 * Disable requests have higher priority than Presence Detect Changed
672 * or Data Link Layer State Changed events.
673 */
674 down_read(&ctrl->reset_lock);
675 if (events & DISABLE_SLOT)
David Brazdil0f672f62019-12-10 10:32:29 +0000676 pciehp_handle_disable_request(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000677 else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
David Brazdil0f672f62019-12-10 10:32:29 +0000678 pciehp_handle_presence_or_link_change(ctrl, events);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000679 up_read(&ctrl->reset_lock);
680
Olivier Deprez0e641232021-09-23 10:07:05 +0200681 ret = IRQ_HANDLED;
682out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000683 pci_config_pm_runtime_put(pdev);
Olivier Deprez0e641232021-09-23 10:07:05 +0200684 ctrl->ist_running = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000685 wake_up(&ctrl->requester);
Olivier Deprez0e641232021-09-23 10:07:05 +0200686 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000687}
688
689static int pciehp_poll(void *data)
690{
691 struct controller *ctrl = data;
692
693 schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
694
695 while (!kthread_should_stop()) {
696 /* poll for interrupt events or user requests */
697 while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
698 atomic_read(&ctrl->pending_events))
699 pciehp_ist(IRQ_NOTCONNECTED, ctrl);
700
701 if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
702 pciehp_poll_time = 2; /* clamp to sane value */
703
704 schedule_timeout_idle(pciehp_poll_time * HZ);
705 }
706
707 return 0;
708}
709
710static void pcie_enable_notification(struct controller *ctrl)
711{
712 u16 cmd, mask;
713
714 /*
715 * TBD: Power fault detected software notification support.
716 *
717 * Power fault detected software notification is not enabled
718 * now, because it caused power fault detected interrupt storm
719 * on some machines. On those machines, power fault detected
720 * bit in the slot status register was set again immediately
721 * when it is cleared in the interrupt service routine, and
722 * next power fault detected interrupt was notified again.
723 */
724
725 /*
726 * Always enable link events: thus link-up and link-down shall
727 * always be treated as hotplug and unplug respectively. Enable
728 * presence detect only if Attention Button is not present.
729 */
730 cmd = PCI_EXP_SLTCTL_DLLSCE;
731 if (ATTN_BUTTN(ctrl))
732 cmd |= PCI_EXP_SLTCTL_ABPE;
733 else
734 cmd |= PCI_EXP_SLTCTL_PDCE;
735 if (!pciehp_poll_mode)
736 cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
737
738 mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
739 PCI_EXP_SLTCTL_PFDE |
740 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
741 PCI_EXP_SLTCTL_DLLSCE);
742
743 pcie_write_cmd_nowait(ctrl, cmd, mask);
744 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
745 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
746}
747
748static void pcie_disable_notification(struct controller *ctrl)
749{
750 u16 mask;
751
752 mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
753 PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
754 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
755 PCI_EXP_SLTCTL_DLLSCE);
756 pcie_write_cmd(ctrl, 0, mask);
757 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
758 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
759}
760
761void pcie_clear_hotplug_events(struct controller *ctrl)
762{
763 pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
764 PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
765}
766
David Brazdil0f672f62019-12-10 10:32:29 +0000767void pcie_enable_interrupt(struct controller *ctrl)
768{
769 u16 mask;
770
771 mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
772 pcie_write_cmd(ctrl, mask, mask);
773}
774
775void pcie_disable_interrupt(struct controller *ctrl)
776{
777 u16 mask;
778
779 /*
780 * Mask hot-plug interrupt to prevent it triggering immediately
781 * when the link goes inactive (we still get PME when any of the
782 * enabled events is detected). Same goes with Link Layer State
783 * changed event which generates PME immediately when the link goes
784 * inactive so mask it as well.
785 */
786 mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
787 pcie_write_cmd(ctrl, 0, mask);
788}
789
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000790/*
791 * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
792 * bus reset of the bridge, but at the same time we want to ensure that it is
793 * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
794 * disable link state notification and presence detection change notification
795 * momentarily, if we see that they could interfere. Also, clear any spurious
796 * events after.
797 */
David Brazdil0f672f62019-12-10 10:32:29 +0000798int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000799{
David Brazdil0f672f62019-12-10 10:32:29 +0000800 struct controller *ctrl = to_ctrl(hotplug_slot);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000801 struct pci_dev *pdev = ctrl_dev(ctrl);
802 u16 stat_mask = 0, ctrl_mask = 0;
803 int rc;
804
805 if (probe)
806 return 0;
807
808 down_write(&ctrl->reset_lock);
809
810 if (!ATTN_BUTTN(ctrl)) {
811 ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
812 stat_mask |= PCI_EXP_SLTSTA_PDC;
813 }
814 ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
815 stat_mask |= PCI_EXP_SLTSTA_DLLSC;
816
817 pcie_write_cmd(ctrl, 0, ctrl_mask);
818 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
819 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
820
821 rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
822
823 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
824 pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
825 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
826 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
827
828 up_write(&ctrl->reset_lock);
829 return rc;
830}
831
832int pcie_init_notification(struct controller *ctrl)
833{
834 if (pciehp_request_irq(ctrl))
835 return -1;
836 pcie_enable_notification(ctrl);
837 ctrl->notification_enabled = 1;
838 return 0;
839}
840
841void pcie_shutdown_notification(struct controller *ctrl)
842{
843 if (ctrl->notification_enabled) {
844 pcie_disable_notification(ctrl);
845 pciehp_free_irq(ctrl);
846 ctrl->notification_enabled = 0;
847 }
848}
849
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000850static inline void dbg_ctrl(struct controller *ctrl)
851{
852 struct pci_dev *pdev = ctrl->pcie->port;
853 u16 reg16;
854
David Brazdil0f672f62019-12-10 10:32:29 +0000855 ctrl_dbg(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000856 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
David Brazdil0f672f62019-12-10 10:32:29 +0000857 ctrl_dbg(ctrl, "Slot Status : 0x%04x\n", reg16);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000858 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
David Brazdil0f672f62019-12-10 10:32:29 +0000859 ctrl_dbg(ctrl, "Slot Control : 0x%04x\n", reg16);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000860}
861
862#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
863
864struct controller *pcie_init(struct pcie_device *dev)
865{
866 struct controller *ctrl;
867 u32 slot_cap, link_cap;
David Brazdil0f672f62019-12-10 10:32:29 +0000868 u8 poweron;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000869 struct pci_dev *pdev = dev->port;
David Brazdil0f672f62019-12-10 10:32:29 +0000870 struct pci_bus *subordinate = pdev->subordinate;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000871
872 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
873 if (!ctrl)
David Brazdil0f672f62019-12-10 10:32:29 +0000874 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000875
876 ctrl->pcie = dev;
877 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
878
879 if (pdev->hotplug_user_indicators)
880 slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
881
882 /*
883 * We assume no Thunderbolt controllers support Command Complete events,
884 * but some controllers falsely claim they do.
885 */
886 if (pdev->is_thunderbolt)
887 slot_cap |= PCI_EXP_SLTCAP_NCCS;
888
889 ctrl->slot_cap = slot_cap;
890 mutex_init(&ctrl->ctrl_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000891 mutex_init(&ctrl->state_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000892 init_rwsem(&ctrl->reset_lock);
893 init_waitqueue_head(&ctrl->requester);
894 init_waitqueue_head(&ctrl->queue);
David Brazdil0f672f62019-12-10 10:32:29 +0000895 INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000896 dbg_ctrl(ctrl);
897
David Brazdil0f672f62019-12-10 10:32:29 +0000898 down_read(&pci_bus_sem);
899 ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
900 up_read(&pci_bus_sem);
901
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000902 /* Check if Data Link Layer Link Active Reporting is implemented */
903 pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000904
905 /* Clear all remaining event bits in Slot Status register. */
906 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
907 PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
908 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
909 PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
910
911 ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
912 (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
913 FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
914 FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
915 FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
916 FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
917 FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
918 FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
919 FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
920 FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
921 FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
922 FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
923 pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
924
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000925 /*
926 * If empty slot's power status is on, turn power off. The IRQ isn't
927 * requested yet, so avoid triggering a notification with this command.
928 */
929 if (POWER_CTRL(ctrl)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000930 pciehp_get_power_status(ctrl, &poweron);
931 if (!pciehp_card_present_or_link_active(ctrl) && poweron) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000932 pcie_disable_notification(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +0000933 pciehp_power_off_slot(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000934 }
935 }
936
937 return ctrl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000938}
939
940void pciehp_release_ctrl(struct controller *ctrl)
941{
David Brazdil0f672f62019-12-10 10:32:29 +0000942 cancel_delayed_work_sync(&ctrl->button_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000943 kfree(ctrl);
944}
945
946static void quirk_cmd_compl(struct pci_dev *pdev)
947{
948 u32 slot_cap;
949
950 if (pci_is_pcie(pdev)) {
951 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
952 if (slot_cap & PCI_EXP_SLTCAP_HPC &&
953 !(slot_cap & PCI_EXP_SLTCAP_NCCS))
954 pdev->broken_cmd_compl = 1;
955 }
956}
957DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
958 PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
959DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
960 PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
961DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
962 PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
David Brazdil0f672f62019-12-10 10:32:29 +0000963DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401,
964 PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);