1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 */
6
7 #include <linux/module.h>
8 #include <linux/kernel.h>
9 #include <linux/device.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/pci.h>
13 #include <linux/sched.h>
14 #include <linux/interrupt.h>
15
16 #include <linux/pm_domain.h>
17 #include <linux/pm_runtime.h>
18
19 #include <linux/mei.h>
20
21 #include "mei_dev.h"
22 #include "client.h"
23 #include "hw-me-regs.h"
24 #include "hw-me.h"
25
26 /* mei_pci_tbl - PCI Device ID Table */
27 static const struct pci_device_id mei_me_pci_tbl[] = {
28 {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, MEI_ME_ICH_CFG)},
29 {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, MEI_ME_ICH_CFG)},
30 {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, MEI_ME_ICH_CFG)},
31 {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, MEI_ME_ICH_CFG)},
32 {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, MEI_ME_ICH_CFG)},
33 {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, MEI_ME_ICH_CFG)},
34 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, MEI_ME_ICH_CFG)},
35 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, MEI_ME_ICH_CFG)},
36 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, MEI_ME_ICH_CFG)},
37 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, MEI_ME_ICH_CFG)},
38 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, MEI_ME_ICH_CFG)},
39
40 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, MEI_ME_ICH_CFG)},
41 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, MEI_ME_ICH_CFG)},
42 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, MEI_ME_ICH_CFG)},
43 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, MEI_ME_ICH_CFG)},
44 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, MEI_ME_ICH_CFG)},
45 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, MEI_ME_ICH_CFG)},
46 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, MEI_ME_ICH_CFG)},
47 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, MEI_ME_ICH_CFG)},
48 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, MEI_ME_ICH_CFG)},
49
50 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, MEI_ME_ICH10_CFG)},
51 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, MEI_ME_ICH10_CFG)},
52 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
53 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
54
55 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
56 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
57 {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
58 {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
59 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
60 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
61 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
62 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)},
63 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)},
64 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
65 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)},
66 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
67 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
68
69 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
70 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
71 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_3, MEI_ME_PCH8_ITOUCH_CFG)},
72 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
73 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
74 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
75
76 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
77 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
78
79 {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
80
81 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
82
83 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
84 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
85 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_3, MEI_ME_PCH8_CFG)},
86
87 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
88 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
89 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
90 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_ITOUCH_CFG)},
91
92 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
93 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
94 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
95 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)},
96 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
97
98 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
99 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)},
100
101 {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
102 {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
103
104 {MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
105
106 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
107 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
108
109 {MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
110
111 {MEI_PCI_DEVICE(MEI_DEV_ID_EBG, MEI_ME_PCH15_SPS_CFG)},
112
113 {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
114 {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
115 {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
116 {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
117
118 {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
119
120 {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
121
122 /* required last entry */
123 {0, }
124 };
125
126 MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
127
128 #ifdef CONFIG_PM
129 static inline void mei_me_set_pm_domain(struct mei_device *dev);
130 static inline void mei_me_unset_pm_domain(struct mei_device *dev);
131 #else
mei_me_set_pm_domain(struct mei_device * dev)132 static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
mei_me_unset_pm_domain(struct mei_device * dev)133 static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
134 #endif /* CONFIG_PM */
135
mei_me_read_fws(const struct mei_device * dev,int where,u32 * val)136 static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val)
137 {
138 struct pci_dev *pdev = to_pci_dev(dev->dev);
139
140 return pci_read_config_dword(pdev, where, val);
141 }
142
143 /**
144 * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
145 *
146 * @pdev: PCI device structure
147 * @cfg: per generation config
148 *
149 * Return: true if ME Interface is valid, false otherwise
150 */
mei_me_quirk_probe(struct pci_dev * pdev,const struct mei_cfg * cfg)151 static bool mei_me_quirk_probe(struct pci_dev *pdev,
152 const struct mei_cfg *cfg)
153 {
154 if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
155 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
156 return false;
157 }
158
159 return true;
160 }
161
162 /**
163 * mei_me_probe - Device Initialization Routine
164 *
165 * @pdev: PCI device structure
166 * @ent: entry in kcs_pci_tbl
167 *
168 * Return: 0 on success, <0 on failure.
169 */
mei_me_probe(struct pci_dev * pdev,const struct pci_device_id * ent)170 static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
171 {
172 const struct mei_cfg *cfg;
173 struct mei_device *dev;
174 struct mei_me_hw *hw;
175 unsigned int irqflags;
176 int err;
177
178 cfg = mei_me_get_cfg(ent->driver_data);
179 if (!cfg)
180 return -ENODEV;
181
182 if (!mei_me_quirk_probe(pdev, cfg))
183 return -ENODEV;
184
185 /* enable pci dev */
186 err = pcim_enable_device(pdev);
187 if (err) {
188 dev_err(&pdev->dev, "failed to enable pci device.\n");
189 goto end;
190 }
191 /* set PCI host mastering */
192 pci_set_master(pdev);
193 /* pci request regions and mapping IO device memory for mei driver */
194 err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
195 if (err) {
196 dev_err(&pdev->dev, "failed to get pci regions.\n");
197 goto end;
198 }
199
200 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
201 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
202
203 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
204 if (err)
205 err = dma_set_coherent_mask(&pdev->dev,
206 DMA_BIT_MASK(32));
207 }
208 if (err) {
209 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
210 goto end;
211 }
212
213 /* allocates and initializes the mei dev structure */
214 dev = mei_me_dev_init(&pdev->dev, cfg);
215 if (!dev) {
216 err = -ENOMEM;
217 goto end;
218 }
219 hw = to_me_hw(dev);
220 hw->mem_addr = pcim_iomap_table(pdev)[0];
221 hw->read_fws = mei_me_read_fws;
222
223 pci_enable_msi(pdev);
224
225 hw->irq = pdev->irq;
226
227 /* request and enable interrupt */
228 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
229
230 err = request_threaded_irq(pdev->irq,
231 mei_me_irq_quick_handler,
232 mei_me_irq_thread_handler,
233 irqflags, KBUILD_MODNAME, dev);
234 if (err) {
235 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
236 pdev->irq);
237 goto end;
238 }
239
240 if (mei_start(dev)) {
241 dev_err(&pdev->dev, "init hw failure.\n");
242 err = -ENODEV;
243 goto release_irq;
244 }
245
246 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
247 pm_runtime_use_autosuspend(&pdev->dev);
248
249 err = mei_register(dev, &pdev->dev);
250 if (err)
251 goto stop;
252
253 pci_set_drvdata(pdev, dev);
254
255 /*
256 * MEI requires to resume from runtime suspend mode
257 * in order to perform link reset flow upon system suspend.
258 */
259 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
260
261 /*
262 * ME maps runtime suspend/resume to D0i states,
263 * hence we need to go around native PCI runtime service which
264 * eventually brings the device into D3cold/hot state,
265 * but the mei device cannot wake up from D3 unlike from D0i3.
266 * To get around the PCI device native runtime pm,
267 * ME uses runtime pm domain handlers which take precedence
268 * over the driver's pm handlers.
269 */
270 mei_me_set_pm_domain(dev);
271
272 if (mei_pg_is_enabled(dev)) {
273 pm_runtime_put_noidle(&pdev->dev);
274 if (hw->d0i3_supported)
275 pm_runtime_allow(&pdev->dev);
276 }
277
278 dev_dbg(&pdev->dev, "initialization successful.\n");
279
280 return 0;
281
282 stop:
283 mei_stop(dev);
284 release_irq:
285 mei_cancel_work(dev);
286 mei_disable_interrupts(dev);
287 free_irq(pdev->irq, dev);
288 end:
289 dev_err(&pdev->dev, "initialization failed.\n");
290 return err;
291 }
292
293 /**
294 * mei_me_shutdown - Device Removal Routine
295 *
296 * @pdev: PCI device structure
297 *
298 * mei_me_shutdown is called from the reboot notifier
299 * it's a simplified version of remove so we go down
300 * faster.
301 */
mei_me_shutdown(struct pci_dev * pdev)302 static void mei_me_shutdown(struct pci_dev *pdev)
303 {
304 struct mei_device *dev;
305
306 dev = pci_get_drvdata(pdev);
307 if (!dev)
308 return;
309
310 dev_dbg(&pdev->dev, "shutdown\n");
311 mei_stop(dev);
312
313 mei_me_unset_pm_domain(dev);
314
315 mei_disable_interrupts(dev);
316 free_irq(pdev->irq, dev);
317 }
318
319 /**
320 * mei_me_remove - Device Removal Routine
321 *
322 * @pdev: PCI device structure
323 *
324 * mei_me_remove is called by the PCI subsystem to alert the driver
325 * that it should release a PCI device.
326 */
mei_me_remove(struct pci_dev * pdev)327 static void mei_me_remove(struct pci_dev *pdev)
328 {
329 struct mei_device *dev;
330
331 dev = pci_get_drvdata(pdev);
332 if (!dev)
333 return;
334
335 if (mei_pg_is_enabled(dev))
336 pm_runtime_get_noresume(&pdev->dev);
337
338 dev_dbg(&pdev->dev, "stop\n");
339 mei_stop(dev);
340
341 mei_me_unset_pm_domain(dev);
342
343 mei_disable_interrupts(dev);
344
345 free_irq(pdev->irq, dev);
346
347 mei_deregister(dev);
348 }
349
350 #ifdef CONFIG_PM_SLEEP
mei_me_pci_suspend(struct device * device)351 static int mei_me_pci_suspend(struct device *device)
352 {
353 struct pci_dev *pdev = to_pci_dev(device);
354 struct mei_device *dev = pci_get_drvdata(pdev);
355
356 if (!dev)
357 return -ENODEV;
358
359 dev_dbg(&pdev->dev, "suspend\n");
360
361 mei_stop(dev);
362
363 mei_disable_interrupts(dev);
364
365 free_irq(pdev->irq, dev);
366 pci_disable_msi(pdev);
367
368 return 0;
369 }
370
mei_me_pci_resume(struct device * device)371 static int mei_me_pci_resume(struct device *device)
372 {
373 struct pci_dev *pdev = to_pci_dev(device);
374 struct mei_device *dev;
375 unsigned int irqflags;
376 int err;
377
378 dev = pci_get_drvdata(pdev);
379 if (!dev)
380 return -ENODEV;
381
382 pci_enable_msi(pdev);
383
384 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
385
386 /* request and enable interrupt */
387 err = request_threaded_irq(pdev->irq,
388 mei_me_irq_quick_handler,
389 mei_me_irq_thread_handler,
390 irqflags, KBUILD_MODNAME, dev);
391
392 if (err) {
393 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
394 pdev->irq);
395 return err;
396 }
397
398 err = mei_restart(dev);
399 if (err)
400 return err;
401
402 /* Start timer if stopped in suspend */
403 schedule_delayed_work(&dev->timer_work, HZ);
404
405 return 0;
406 }
407 #endif /* CONFIG_PM_SLEEP */
408
409 #ifdef CONFIG_PM
mei_me_pm_runtime_idle(struct device * device)410 static int mei_me_pm_runtime_idle(struct device *device)
411 {
412 struct mei_device *dev;
413
414 dev_dbg(device, "rpm: me: runtime_idle\n");
415
416 dev = dev_get_drvdata(device);
417 if (!dev)
418 return -ENODEV;
419 if (mei_write_is_idle(dev))
420 pm_runtime_autosuspend(device);
421
422 return -EBUSY;
423 }
424
mei_me_pm_runtime_suspend(struct device * device)425 static int mei_me_pm_runtime_suspend(struct device *device)
426 {
427 struct mei_device *dev;
428 int ret;
429
430 dev_dbg(device, "rpm: me: runtime suspend\n");
431
432 dev = dev_get_drvdata(device);
433 if (!dev)
434 return -ENODEV;
435
436 mutex_lock(&dev->device_lock);
437
438 if (mei_write_is_idle(dev))
439 ret = mei_me_pg_enter_sync(dev);
440 else
441 ret = -EAGAIN;
442
443 mutex_unlock(&dev->device_lock);
444
445 dev_dbg(device, "rpm: me: runtime suspend ret=%d\n", ret);
446
447 if (ret && ret != -EAGAIN)
448 schedule_work(&dev->reset_work);
449
450 return ret;
451 }
452
mei_me_pm_runtime_resume(struct device * device)453 static int mei_me_pm_runtime_resume(struct device *device)
454 {
455 struct mei_device *dev;
456 int ret;
457
458 dev_dbg(device, "rpm: me: runtime resume\n");
459
460 dev = dev_get_drvdata(device);
461 if (!dev)
462 return -ENODEV;
463
464 mutex_lock(&dev->device_lock);
465
466 ret = mei_me_pg_exit_sync(dev);
467
468 mutex_unlock(&dev->device_lock);
469
470 dev_dbg(device, "rpm: me: runtime resume ret = %d\n", ret);
471
472 if (ret)
473 schedule_work(&dev->reset_work);
474
475 return ret;
476 }
477
478 /**
479 * mei_me_set_pm_domain - fill and set pm domain structure for device
480 *
481 * @dev: mei_device
482 */
mei_me_set_pm_domain(struct mei_device * dev)483 static inline void mei_me_set_pm_domain(struct mei_device *dev)
484 {
485 struct pci_dev *pdev = to_pci_dev(dev->dev);
486
487 if (pdev->dev.bus && pdev->dev.bus->pm) {
488 dev->pg_domain.ops = *pdev->dev.bus->pm;
489
490 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
491 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
492 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
493
494 dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
495 }
496 }
497
498 /**
499 * mei_me_unset_pm_domain - clean pm domain structure for device
500 *
501 * @dev: mei_device
502 */
mei_me_unset_pm_domain(struct mei_device * dev)503 static inline void mei_me_unset_pm_domain(struct mei_device *dev)
504 {
505 /* stop using pm callbacks if any */
506 dev_pm_domain_set(dev->dev, NULL);
507 }
508
509 static const struct dev_pm_ops mei_me_pm_ops = {
510 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
511 mei_me_pci_resume)
512 SET_RUNTIME_PM_OPS(
513 mei_me_pm_runtime_suspend,
514 mei_me_pm_runtime_resume,
515 mei_me_pm_runtime_idle)
516 };
517
518 #define MEI_ME_PM_OPS (&mei_me_pm_ops)
519 #else
520 #define MEI_ME_PM_OPS NULL
521 #endif /* CONFIG_PM */
522 /*
523 * PCI driver structure
524 */
525 static struct pci_driver mei_me_driver = {
526 .name = KBUILD_MODNAME,
527 .id_table = mei_me_pci_tbl,
528 .probe = mei_me_probe,
529 .remove = mei_me_remove,
530 .shutdown = mei_me_shutdown,
531 .driver.pm = MEI_ME_PM_OPS,
532 .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
533 };
534
535 module_pci_driver(mei_me_driver);
536
537 MODULE_AUTHOR("Intel Corporation");
538 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
539 MODULE_LICENSE("GPL v2");
540