blob: ce43415a536cab031f0b189409a9d246365e17a4 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
David Brazdil0f672f62019-12-10 10:32:29 +00003 * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 * Intel Management Engine Interface (Intel MEI) Linux driver
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 */
David Brazdil0f672f62019-12-10 10:32:29 +00006
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007#include <linux/module.h>
8#include <linux/moduleparam.h>
9#include <linux/kernel.h>
10#include <linux/device.h>
11#include <linux/fs.h>
12#include <linux/errno.h>
13#include <linux/types.h>
14#include <linux/fcntl.h>
15#include <linux/pci.h>
16#include <linux/poll.h>
17#include <linux/ioctl.h>
18#include <linux/cdev.h>
19#include <linux/sched.h>
20#include <linux/uuid.h>
21#include <linux/compat.h>
22#include <linux/jiffies.h>
23#include <linux/interrupt.h>
24
25#include <linux/pm_domain.h>
26#include <linux/pm_runtime.h>
27
28#include <linux/mei.h>
29
30#include "mei_dev.h"
31#include "client.h"
32#include "hw-me-regs.h"
33#include "hw-me.h"
34
35/* mei_pci_tbl - PCI Device ID Table */
36static const struct pci_device_id mei_me_pci_tbl[] = {
37 {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, MEI_ME_ICH_CFG)},
38 {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, MEI_ME_ICH_CFG)},
39 {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, MEI_ME_ICH_CFG)},
40 {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, MEI_ME_ICH_CFG)},
41 {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, MEI_ME_ICH_CFG)},
42 {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, MEI_ME_ICH_CFG)},
43 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, MEI_ME_ICH_CFG)},
44 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, MEI_ME_ICH_CFG)},
45 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, MEI_ME_ICH_CFG)},
46 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, MEI_ME_ICH_CFG)},
47 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, MEI_ME_ICH_CFG)},
48
49 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, MEI_ME_ICH_CFG)},
50 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, MEI_ME_ICH_CFG)},
51 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, MEI_ME_ICH_CFG)},
52 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, MEI_ME_ICH_CFG)},
53 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, MEI_ME_ICH_CFG)},
54 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, MEI_ME_ICH_CFG)},
55 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, MEI_ME_ICH_CFG)},
56 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, MEI_ME_ICH_CFG)},
57 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, MEI_ME_ICH_CFG)},
58
59 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, MEI_ME_ICH10_CFG)},
60 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, MEI_ME_ICH10_CFG)},
61 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
62 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
63
David Brazdil0f672f62019-12-10 10:32:29 +000064 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
65 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
67 {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
David Brazdil0f672f62019-12-10 10:32:29 +000068 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
69 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
70 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
72 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
73 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
74 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)},
75 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
76 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
77
78 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
79 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
80 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
81 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
David Brazdil0f672f62019-12-10 10:32:29 +000082 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083
84 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
85 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
86
David Brazdil0f672f62019-12-10 10:32:29 +000087 {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
88
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
90
91 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
92 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
93
David Brazdil0f672f62019-12-10 10:32:29 +000094 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000095 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)},
David Brazdil0f672f62019-12-10 10:32:29 +000096 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
98
David Brazdil0f672f62019-12-10 10:32:29 +000099 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
100 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
101 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
102
103 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
104
105 {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
106
107 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
108 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
109
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110 /* required last entry */
111 {0, }
112};
113
114MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
115
116#ifdef CONFIG_PM
117static inline void mei_me_set_pm_domain(struct mei_device *dev);
118static inline void mei_me_unset_pm_domain(struct mei_device *dev);
119#else
120static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
121static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
122#endif /* CONFIG_PM */
123
124/**
125 * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
126 *
127 * @pdev: PCI device structure
128 * @cfg: per generation config
129 *
130 * Return: true if ME Interface is valid, false otherwise
131 */
132static bool mei_me_quirk_probe(struct pci_dev *pdev,
133 const struct mei_cfg *cfg)
134{
135 if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
136 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
137 return false;
138 }
139
140 return true;
141}
142
143/**
144 * mei_me_probe - Device Initialization Routine
145 *
146 * @pdev: PCI device structure
147 * @ent: entry in kcs_pci_tbl
148 *
149 * Return: 0 on success, <0 on failure.
150 */
151static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
152{
153 const struct mei_cfg *cfg;
154 struct mei_device *dev;
155 struct mei_me_hw *hw;
156 unsigned int irqflags;
157 int err;
158
159 cfg = mei_me_get_cfg(ent->driver_data);
160 if (!cfg)
161 return -ENODEV;
162
163 if (!mei_me_quirk_probe(pdev, cfg))
164 return -ENODEV;
165
166 /* enable pci dev */
167 err = pcim_enable_device(pdev);
168 if (err) {
169 dev_err(&pdev->dev, "failed to enable pci device.\n");
170 goto end;
171 }
172 /* set PCI host mastering */
173 pci_set_master(pdev);
174 /* pci request regions and mapping IO device memory for mei driver */
175 err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
176 if (err) {
177 dev_err(&pdev->dev, "failed to get pci regions.\n");
178 goto end;
179 }
180
181 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
182 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
183
184 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
185 if (err)
186 err = dma_set_coherent_mask(&pdev->dev,
187 DMA_BIT_MASK(32));
188 }
189 if (err) {
190 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
191 goto end;
192 }
193
194 /* allocates and initializes the mei dev structure */
195 dev = mei_me_dev_init(pdev, cfg);
196 if (!dev) {
197 err = -ENOMEM;
198 goto end;
199 }
200 hw = to_me_hw(dev);
201 hw->mem_addr = pcim_iomap_table(pdev)[0];
202
203 pci_enable_msi(pdev);
204
205 /* request and enable interrupt */
206 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
207
208 err = request_threaded_irq(pdev->irq,
209 mei_me_irq_quick_handler,
210 mei_me_irq_thread_handler,
211 irqflags, KBUILD_MODNAME, dev);
212 if (err) {
213 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
214 pdev->irq);
215 goto end;
216 }
217
218 if (mei_start(dev)) {
219 dev_err(&pdev->dev, "init hw failure.\n");
220 err = -ENODEV;
221 goto release_irq;
222 }
223
224 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
225 pm_runtime_use_autosuspend(&pdev->dev);
226
227 err = mei_register(dev, &pdev->dev);
228 if (err)
229 goto stop;
230
231 pci_set_drvdata(pdev, dev);
232
233 /*
234 * MEI requires to resume from runtime suspend mode
235 * in order to perform link reset flow upon system suspend.
236 */
237 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
238
239 /*
240 * ME maps runtime suspend/resume to D0i states,
241 * hence we need to go around native PCI runtime service which
242 * eventually brings the device into D3cold/hot state,
243 * but the mei device cannot wake up from D3 unlike from D0i3.
244 * To get around the PCI device native runtime pm,
245 * ME uses runtime pm domain handlers which take precedence
246 * over the driver's pm handlers.
247 */
248 mei_me_set_pm_domain(dev);
249
250 if (mei_pg_is_enabled(dev)) {
251 pm_runtime_put_noidle(&pdev->dev);
252 if (hw->d0i3_supported)
253 pm_runtime_allow(&pdev->dev);
254 }
255
256 dev_dbg(&pdev->dev, "initialization successful.\n");
257
258 return 0;
259
260stop:
261 mei_stop(dev);
262release_irq:
263 mei_cancel_work(dev);
264 mei_disable_interrupts(dev);
265 free_irq(pdev->irq, dev);
266end:
267 dev_err(&pdev->dev, "initialization failed.\n");
268 return err;
269}
270
271/**
272 * mei_me_shutdown - Device Removal Routine
273 *
274 * @pdev: PCI device structure
275 *
276 * mei_me_shutdown is called from the reboot notifier
277 * it's a simplified version of remove so we go down
278 * faster.
279 */
280static void mei_me_shutdown(struct pci_dev *pdev)
281{
282 struct mei_device *dev;
283
284 dev = pci_get_drvdata(pdev);
285 if (!dev)
286 return;
287
288 dev_dbg(&pdev->dev, "shutdown\n");
289 mei_stop(dev);
290
291 mei_me_unset_pm_domain(dev);
292
293 mei_disable_interrupts(dev);
294 free_irq(pdev->irq, dev);
295}
296
297/**
298 * mei_me_remove - Device Removal Routine
299 *
300 * @pdev: PCI device structure
301 *
302 * mei_me_remove is called by the PCI subsystem to alert the driver
303 * that it should release a PCI device.
304 */
305static void mei_me_remove(struct pci_dev *pdev)
306{
307 struct mei_device *dev;
308
309 dev = pci_get_drvdata(pdev);
310 if (!dev)
311 return;
312
313 if (mei_pg_is_enabled(dev))
314 pm_runtime_get_noresume(&pdev->dev);
315
316 dev_dbg(&pdev->dev, "stop\n");
317 mei_stop(dev);
318
319 mei_me_unset_pm_domain(dev);
320
321 mei_disable_interrupts(dev);
322
323 free_irq(pdev->irq, dev);
324
325 mei_deregister(dev);
326}
327
328#ifdef CONFIG_PM_SLEEP
329static int mei_me_pci_suspend(struct device *device)
330{
331 struct pci_dev *pdev = to_pci_dev(device);
332 struct mei_device *dev = pci_get_drvdata(pdev);
333
334 if (!dev)
335 return -ENODEV;
336
337 dev_dbg(&pdev->dev, "suspend\n");
338
339 mei_stop(dev);
340
341 mei_disable_interrupts(dev);
342
343 free_irq(pdev->irq, dev);
344 pci_disable_msi(pdev);
345
346 return 0;
347}
348
349static int mei_me_pci_resume(struct device *device)
350{
351 struct pci_dev *pdev = to_pci_dev(device);
352 struct mei_device *dev;
353 unsigned int irqflags;
354 int err;
355
356 dev = pci_get_drvdata(pdev);
357 if (!dev)
358 return -ENODEV;
359
360 pci_enable_msi(pdev);
361
362 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
363
364 /* request and enable interrupt */
365 err = request_threaded_irq(pdev->irq,
366 mei_me_irq_quick_handler,
367 mei_me_irq_thread_handler,
368 irqflags, KBUILD_MODNAME, dev);
369
370 if (err) {
371 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
372 pdev->irq);
373 return err;
374 }
375
376 err = mei_restart(dev);
377 if (err)
378 return err;
379
380 /* Start timer if stopped in suspend */
381 schedule_delayed_work(&dev->timer_work, HZ);
382
383 return 0;
384}
385#endif /* CONFIG_PM_SLEEP */
386
387#ifdef CONFIG_PM
388static int mei_me_pm_runtime_idle(struct device *device)
389{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000390 struct mei_device *dev;
391
David Brazdil0f672f62019-12-10 10:32:29 +0000392 dev_dbg(device, "rpm: me: runtime_idle\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000393
David Brazdil0f672f62019-12-10 10:32:29 +0000394 dev = dev_get_drvdata(device);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000395 if (!dev)
396 return -ENODEV;
397 if (mei_write_is_idle(dev))
398 pm_runtime_autosuspend(device);
399
400 return -EBUSY;
401}
402
403static int mei_me_pm_runtime_suspend(struct device *device)
404{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000405 struct mei_device *dev;
406 int ret;
407
David Brazdil0f672f62019-12-10 10:32:29 +0000408 dev_dbg(device, "rpm: me: runtime suspend\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000409
David Brazdil0f672f62019-12-10 10:32:29 +0000410 dev = dev_get_drvdata(device);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000411 if (!dev)
412 return -ENODEV;
413
414 mutex_lock(&dev->device_lock);
415
416 if (mei_write_is_idle(dev))
417 ret = mei_me_pg_enter_sync(dev);
418 else
419 ret = -EAGAIN;
420
421 mutex_unlock(&dev->device_lock);
422
David Brazdil0f672f62019-12-10 10:32:29 +0000423 dev_dbg(device, "rpm: me: runtime suspend ret=%d\n", ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000424
425 if (ret && ret != -EAGAIN)
426 schedule_work(&dev->reset_work);
427
428 return ret;
429}
430
431static int mei_me_pm_runtime_resume(struct device *device)
432{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000433 struct mei_device *dev;
434 int ret;
435
David Brazdil0f672f62019-12-10 10:32:29 +0000436 dev_dbg(device, "rpm: me: runtime resume\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000437
David Brazdil0f672f62019-12-10 10:32:29 +0000438 dev = dev_get_drvdata(device);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000439 if (!dev)
440 return -ENODEV;
441
442 mutex_lock(&dev->device_lock);
443
444 ret = mei_me_pg_exit_sync(dev);
445
446 mutex_unlock(&dev->device_lock);
447
David Brazdil0f672f62019-12-10 10:32:29 +0000448 dev_dbg(device, "rpm: me: runtime resume ret = %d\n", ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000449
450 if (ret)
451 schedule_work(&dev->reset_work);
452
453 return ret;
454}
455
456/**
457 * mei_me_set_pm_domain - fill and set pm domain structure for device
458 *
459 * @dev: mei_device
460 */
461static inline void mei_me_set_pm_domain(struct mei_device *dev)
462{
463 struct pci_dev *pdev = to_pci_dev(dev->dev);
464
465 if (pdev->dev.bus && pdev->dev.bus->pm) {
466 dev->pg_domain.ops = *pdev->dev.bus->pm;
467
468 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
469 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
470 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
471
472 dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
473 }
474}
475
476/**
477 * mei_me_unset_pm_domain - clean pm domain structure for device
478 *
479 * @dev: mei_device
480 */
481static inline void mei_me_unset_pm_domain(struct mei_device *dev)
482{
483 /* stop using pm callbacks if any */
484 dev_pm_domain_set(dev->dev, NULL);
485}
486
487static const struct dev_pm_ops mei_me_pm_ops = {
488 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
489 mei_me_pci_resume)
490 SET_RUNTIME_PM_OPS(
491 mei_me_pm_runtime_suspend,
492 mei_me_pm_runtime_resume,
493 mei_me_pm_runtime_idle)
494};
495
496#define MEI_ME_PM_OPS (&mei_me_pm_ops)
497#else
498#define MEI_ME_PM_OPS NULL
499#endif /* CONFIG_PM */
500/*
501 * PCI driver structure
502 */
503static struct pci_driver mei_me_driver = {
504 .name = KBUILD_MODNAME,
505 .id_table = mei_me_pci_tbl,
506 .probe = mei_me_probe,
507 .remove = mei_me_remove,
508 .shutdown = mei_me_shutdown,
509 .driver.pm = MEI_ME_PM_OPS,
510 .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
511};
512
513module_pci_driver(mei_me_driver);
514
515MODULE_AUTHOR("Intel Corporation");
516MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
517MODULE_LICENSE("GPL v2");