• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Core SoC Power Management Controller Driver
4  *
5  * Copyright (c) 2016, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
9  *          Vishwanath Somayaji <vishwanath.somayaji@intel.com>
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/acpi.h>
15 #include <linux/bitfield.h>
16 #include <linux/debugfs.h>
17 #include <linux/delay.h>
18 #include <linux/dmi.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/pci.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/suspend.h>
25 #include <linux/uaccess.h>
26 #include <linux/uuid.h>
27 
28 #include <acpi/acpi_bus.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/intel-family.h>
31 #include <asm/msr.h>
32 #include <asm/tsc.h>
33 
34 #include "core.h"
35 
36 #define ACPI_S0IX_DSM_UUID		"57a6512e-3979-4e9d-9708-ff13b2508972"
37 #define ACPI_GET_LOW_MODE_REGISTERS	1
38 
39 /* PKGC MSRs are common across Intel Core SoCs */
40 static const struct pmc_bit_map msr_map[] = {
41 	{"Package C2",                  MSR_PKG_C2_RESIDENCY},
42 	{"Package C3",                  MSR_PKG_C3_RESIDENCY},
43 	{"Package C6",                  MSR_PKG_C6_RESIDENCY},
44 	{"Package C7",                  MSR_PKG_C7_RESIDENCY},
45 	{"Package C8",                  MSR_PKG_C8_RESIDENCY},
46 	{"Package C9",                  MSR_PKG_C9_RESIDENCY},
47 	{"Package C10",                 MSR_PKG_C10_RESIDENCY},
48 	{}
49 };
50 
51 static const struct pmc_bit_map spt_pll_map[] = {
52 	{"MIPI PLL",			SPT_PMC_BIT_MPHY_CMN_LANE0},
53 	{"GEN2 USB2PCIE2 PLL",		SPT_PMC_BIT_MPHY_CMN_LANE1},
54 	{"DMIPCIE3 PLL",		SPT_PMC_BIT_MPHY_CMN_LANE2},
55 	{"SATA PLL",			SPT_PMC_BIT_MPHY_CMN_LANE3},
56 	{}
57 };
58 
59 static const struct pmc_bit_map spt_mphy_map[] = {
60 	{"MPHY CORE LANE 0",           SPT_PMC_BIT_MPHY_LANE0},
61 	{"MPHY CORE LANE 1",           SPT_PMC_BIT_MPHY_LANE1},
62 	{"MPHY CORE LANE 2",           SPT_PMC_BIT_MPHY_LANE2},
63 	{"MPHY CORE LANE 3",           SPT_PMC_BIT_MPHY_LANE3},
64 	{"MPHY CORE LANE 4",           SPT_PMC_BIT_MPHY_LANE4},
65 	{"MPHY CORE LANE 5",           SPT_PMC_BIT_MPHY_LANE5},
66 	{"MPHY CORE LANE 6",           SPT_PMC_BIT_MPHY_LANE6},
67 	{"MPHY CORE LANE 7",           SPT_PMC_BIT_MPHY_LANE7},
68 	{"MPHY CORE LANE 8",           SPT_PMC_BIT_MPHY_LANE8},
69 	{"MPHY CORE LANE 9",           SPT_PMC_BIT_MPHY_LANE9},
70 	{"MPHY CORE LANE 10",          SPT_PMC_BIT_MPHY_LANE10},
71 	{"MPHY CORE LANE 11",          SPT_PMC_BIT_MPHY_LANE11},
72 	{"MPHY CORE LANE 12",          SPT_PMC_BIT_MPHY_LANE12},
73 	{"MPHY CORE LANE 13",          SPT_PMC_BIT_MPHY_LANE13},
74 	{"MPHY CORE LANE 14",          SPT_PMC_BIT_MPHY_LANE14},
75 	{"MPHY CORE LANE 15",          SPT_PMC_BIT_MPHY_LANE15},
76 	{}
77 };
78 
79 static const struct pmc_bit_map spt_pfear_map[] = {
80 	{"PMC",				SPT_PMC_BIT_PMC},
81 	{"OPI-DMI",			SPT_PMC_BIT_OPI},
82 	{"SPI / eSPI",			SPT_PMC_BIT_SPI},
83 	{"XHCI",			SPT_PMC_BIT_XHCI},
84 	{"SPA",				SPT_PMC_BIT_SPA},
85 	{"SPB",				SPT_PMC_BIT_SPB},
86 	{"SPC",				SPT_PMC_BIT_SPC},
87 	{"GBE",				SPT_PMC_BIT_GBE},
88 	{"SATA",			SPT_PMC_BIT_SATA},
89 	{"HDA-PGD0",			SPT_PMC_BIT_HDA_PGD0},
90 	{"HDA-PGD1",			SPT_PMC_BIT_HDA_PGD1},
91 	{"HDA-PGD2",			SPT_PMC_BIT_HDA_PGD2},
92 	{"HDA-PGD3",			SPT_PMC_BIT_HDA_PGD3},
93 	{"RSVD",			SPT_PMC_BIT_RSVD_0B},
94 	{"LPSS",			SPT_PMC_BIT_LPSS},
95 	{"LPC",				SPT_PMC_BIT_LPC},
96 	{"SMB",				SPT_PMC_BIT_SMB},
97 	{"ISH",				SPT_PMC_BIT_ISH},
98 	{"P2SB",			SPT_PMC_BIT_P2SB},
99 	{"DFX",				SPT_PMC_BIT_DFX},
100 	{"SCC",				SPT_PMC_BIT_SCC},
101 	{"RSVD",			SPT_PMC_BIT_RSVD_0C},
102 	{"FUSE",			SPT_PMC_BIT_FUSE},
103 	{"CAMERA",			SPT_PMC_BIT_CAMREA},
104 	{"RSVD",			SPT_PMC_BIT_RSVD_0D},
105 	{"USB3-OTG",			SPT_PMC_BIT_USB3_OTG},
106 	{"EXI",				SPT_PMC_BIT_EXI},
107 	{"CSE",				SPT_PMC_BIT_CSE},
108 	{"CSME_KVM",			SPT_PMC_BIT_CSME_KVM},
109 	{"CSME_PMT",			SPT_PMC_BIT_CSME_PMT},
110 	{"CSME_CLINK",			SPT_PMC_BIT_CSME_CLINK},
111 	{"CSME_PTIO",			SPT_PMC_BIT_CSME_PTIO},
112 	{"CSME_USBR",			SPT_PMC_BIT_CSME_USBR},
113 	{"CSME_SUSRAM",			SPT_PMC_BIT_CSME_SUSRAM},
114 	{"CSME_SMT",			SPT_PMC_BIT_CSME_SMT},
115 	{"RSVD",			SPT_PMC_BIT_RSVD_1A},
116 	{"CSME_SMS2",			SPT_PMC_BIT_CSME_SMS2},
117 	{"CSME_SMS1",			SPT_PMC_BIT_CSME_SMS1},
118 	{"CSME_RTC",			SPT_PMC_BIT_CSME_RTC},
119 	{"CSME_PSF",			SPT_PMC_BIT_CSME_PSF},
120 	{}
121 };
122 
123 static const struct pmc_bit_map *ext_spt_pfear_map[] = {
124 	/*
125 	 * Check intel_pmc_core_ids[] users of spt_reg_map for
126 	 * a list of core SoCs using this.
127 	 */
128 	spt_pfear_map,
129 	NULL
130 };
131 
132 static const struct pmc_bit_map spt_ltr_show_map[] = {
133 	{"SOUTHPORT_A",		SPT_PMC_LTR_SPA},
134 	{"SOUTHPORT_B",		SPT_PMC_LTR_SPB},
135 	{"SATA",		SPT_PMC_LTR_SATA},
136 	{"GIGABIT_ETHERNET",	SPT_PMC_LTR_GBE},
137 	{"XHCI",		SPT_PMC_LTR_XHCI},
138 	{"Reserved",		SPT_PMC_LTR_RESERVED},
139 	{"ME",			SPT_PMC_LTR_ME},
140 	/* EVA is Enterprise Value Add, doesn't really exist on PCH */
141 	{"EVA",			SPT_PMC_LTR_EVA},
142 	{"SOUTHPORT_C",		SPT_PMC_LTR_SPC},
143 	{"HD_AUDIO",		SPT_PMC_LTR_AZ},
144 	{"LPSS",		SPT_PMC_LTR_LPSS},
145 	{"SOUTHPORT_D",		SPT_PMC_LTR_SPD},
146 	{"SOUTHPORT_E",		SPT_PMC_LTR_SPE},
147 	{"CAMERA",		SPT_PMC_LTR_CAM},
148 	{"ESPI",		SPT_PMC_LTR_ESPI},
149 	{"SCC",			SPT_PMC_LTR_SCC},
150 	{"ISH",			SPT_PMC_LTR_ISH},
151 	/* Below two cannot be used for LTR_IGNORE */
152 	{"CURRENT_PLATFORM",	SPT_PMC_LTR_CUR_PLT},
153 	{"AGGREGATED_SYSTEM",	SPT_PMC_LTR_CUR_ASLT},
154 	{}
155 };
156 
157 static const struct pmc_reg_map spt_reg_map = {
158 	.pfear_sts = ext_spt_pfear_map,
159 	.mphy_sts = spt_mphy_map,
160 	.pll_sts = spt_pll_map,
161 	.ltr_show_sts = spt_ltr_show_map,
162 	.msr_sts = msr_map,
163 	.slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET,
164 	.slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP,
165 	.ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET,
166 	.regmap_length = SPT_PMC_MMIO_REG_LEN,
167 	.ppfear0_offset = SPT_PMC_XRAM_PPFEAR0A,
168 	.ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES,
169 	.pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
170 	.pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
171 	.ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
172 	.pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
173 };
174 
175 /* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
176 static const struct pmc_bit_map cnp_pfear_map[] = {
177 	{"PMC",                 BIT(0)},
178 	{"OPI-DMI",             BIT(1)},
179 	{"SPI/eSPI",            BIT(2)},
180 	{"XHCI",                BIT(3)},
181 	{"SPA",                 BIT(4)},
182 	{"SPB",                 BIT(5)},
183 	{"SPC",                 BIT(6)},
184 	{"GBE",                 BIT(7)},
185 
186 	{"SATA",                BIT(0)},
187 	{"HDA_PGD0",            BIT(1)},
188 	{"HDA_PGD1",            BIT(2)},
189 	{"HDA_PGD2",            BIT(3)},
190 	{"HDA_PGD3",            BIT(4)},
191 	{"SPD",                 BIT(5)},
192 	{"LPSS",                BIT(6)},
193 	{"LPC",                 BIT(7)},
194 
195 	{"SMB",                 BIT(0)},
196 	{"ISH",                 BIT(1)},
197 	{"P2SB",                BIT(2)},
198 	{"NPK_VNN",             BIT(3)},
199 	{"SDX",                 BIT(4)},
200 	{"SPE",                 BIT(5)},
201 	{"Fuse",                BIT(6)},
202 	{"SBR8",		BIT(7)},
203 
204 	{"CSME_FSC",            BIT(0)},
205 	{"USB3_OTG",            BIT(1)},
206 	{"EXI",                 BIT(2)},
207 	{"CSE",                 BIT(3)},
208 	{"CSME_KVM",            BIT(4)},
209 	{"CSME_PMT",            BIT(5)},
210 	{"CSME_CLINK",          BIT(6)},
211 	{"CSME_PTIO",           BIT(7)},
212 
213 	{"CSME_USBR",           BIT(0)},
214 	{"CSME_SUSRAM",         BIT(1)},
215 	{"CSME_SMT1",           BIT(2)},
216 	{"CSME_SMT4",           BIT(3)},
217 	{"CSME_SMS2",           BIT(4)},
218 	{"CSME_SMS1",           BIT(5)},
219 	{"CSME_RTC",            BIT(6)},
220 	{"CSME_PSF",            BIT(7)},
221 
222 	{"SBR0",                BIT(0)},
223 	{"SBR1",                BIT(1)},
224 	{"SBR2",                BIT(2)},
225 	{"SBR3",                BIT(3)},
226 	{"SBR4",                BIT(4)},
227 	{"SBR5",                BIT(5)},
228 	{"CSME_PECI",           BIT(6)},
229 	{"PSF1",                BIT(7)},
230 
231 	{"PSF2",                BIT(0)},
232 	{"PSF3",                BIT(1)},
233 	{"PSF4",                BIT(2)},
234 	{"CNVI",                BIT(3)},
235 	{"UFS0",                BIT(4)},
236 	{"EMMC",                BIT(5)},
237 	{"SPF",			BIT(6)},
238 	{"SBR6",                BIT(7)},
239 
240 	{"SBR7",                BIT(0)},
241 	{"NPK_AON",             BIT(1)},
242 	{"HDA_PGD4",            BIT(2)},
243 	{"HDA_PGD5",            BIT(3)},
244 	{"HDA_PGD6",            BIT(4)},
245 	{"PSF6",		BIT(5)},
246 	{"PSF7",		BIT(6)},
247 	{"PSF8",		BIT(7)},
248 	{}
249 };
250 
251 static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
252 	/*
253 	 * Check intel_pmc_core_ids[] users of cnp_reg_map for
254 	 * a list of core SoCs using this.
255 	 */
256 	cnp_pfear_map,
257 	NULL
258 };
259 
260 static const struct pmc_bit_map icl_pfear_map[] = {
261 	{"RES_65",		BIT(0)},
262 	{"RES_66",		BIT(1)},
263 	{"RES_67",		BIT(2)},
264 	{"TAM",			BIT(3)},
265 	{"GBETSN",		BIT(4)},
266 	{"TBTLSX",		BIT(5)},
267 	{"RES_71",		BIT(6)},
268 	{"RES_72",		BIT(7)},
269 	{}
270 };
271 
272 static const struct pmc_bit_map *ext_icl_pfear_map[] = {
273 	/*
274 	 * Check intel_pmc_core_ids[] users of icl_reg_map for
275 	 * a list of core SoCs using this.
276 	 */
277 	cnp_pfear_map,
278 	icl_pfear_map,
279 	NULL
280 };
281 
282 static const struct pmc_bit_map tgl_pfear_map[] = {
283 	{"PSF9",		BIT(0)},
284 	{"RES_66",		BIT(1)},
285 	{"RES_67",		BIT(2)},
286 	{"RES_68",		BIT(3)},
287 	{"RES_69",		BIT(4)},
288 	{"RES_70",		BIT(5)},
289 	{"TBTLSX",		BIT(6)},
290 	{}
291 };
292 
293 static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
294 	/*
295 	 * Check intel_pmc_core_ids[] users of tgl_reg_map for
296 	 * a list of core SoCs using this.
297 	 */
298 	cnp_pfear_map,
299 	tgl_pfear_map,
300 	NULL
301 };
302 
303 static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
304 	{"AUDIO_D3",		BIT(0)},
305 	{"OTG_D3",		BIT(1)},
306 	{"XHCI_D3",		BIT(2)},
307 	{"LPIO_D3",		BIT(3)},
308 	{"SDX_D3",		BIT(4)},
309 	{"SATA_D3",		BIT(5)},
310 	{"UFS0_D3",		BIT(6)},
311 	{"UFS1_D3",		BIT(7)},
312 	{"EMMC_D3",		BIT(8)},
313 	{}
314 };
315 
316 static const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
317 	{"SDIO_PLL_OFF",	BIT(0)},
318 	{"USB2_PLL_OFF",	BIT(1)},
319 	{"AUDIO_PLL_OFF",	BIT(2)},
320 	{"OC_PLL_OFF",		BIT(3)},
321 	{"MAIN_PLL_OFF",	BIT(4)},
322 	{"XOSC_OFF",		BIT(5)},
323 	{"LPC_CLKS_GATED",	BIT(6)},
324 	{"PCIE_CLKREQS_IDLE",	BIT(7)},
325 	{"AUDIO_ROSC_OFF",	BIT(8)},
326 	{"HPET_XOSC_CLK_REQ",	BIT(9)},
327 	{"PMC_ROSC_SLOW_CLK",	BIT(10)},
328 	{"AON2_ROSC_GATED",	BIT(11)},
329 	{"CLKACKS_DEASSERTED",	BIT(12)},
330 	{}
331 };
332 
333 static const struct pmc_bit_map cnp_slps0_dbg2_map[] = {
334 	{"MPHY_CORE_GATED",	BIT(0)},
335 	{"CSME_GATED",		BIT(1)},
336 	{"USB2_SUS_GATED",	BIT(2)},
337 	{"DYN_FLEX_IO_IDLE",	BIT(3)},
338 	{"GBE_NO_LINK",		BIT(4)},
339 	{"THERM_SEN_DISABLED",	BIT(5)},
340 	{"PCIE_LOW_POWER",	BIT(6)},
341 	{"ISH_VNNAON_REQ_ACT",	BIT(7)},
342 	{"ISH_VNN_REQ_ACT",	BIT(8)},
343 	{"CNV_VNNAON_REQ_ACT",	BIT(9)},
344 	{"CNV_VNN_REQ_ACT",	BIT(10)},
345 	{"NPK_VNNON_REQ_ACT",	BIT(11)},
346 	{"PMSYNC_STATE_IDLE",	BIT(12)},
347 	{"ALST_GT_THRES",	BIT(13)},
348 	{"PMC_ARC_PG_READY",	BIT(14)},
349 	{}
350 };
351 
352 static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
353 	cnp_slps0_dbg0_map,
354 	cnp_slps0_dbg1_map,
355 	cnp_slps0_dbg2_map,
356 	NULL
357 };
358 
359 static const struct pmc_bit_map cnp_ltr_show_map[] = {
360 	{"SOUTHPORT_A",		CNP_PMC_LTR_SPA},
361 	{"SOUTHPORT_B",		CNP_PMC_LTR_SPB},
362 	{"SATA",		CNP_PMC_LTR_SATA},
363 	{"GIGABIT_ETHERNET",	CNP_PMC_LTR_GBE},
364 	{"XHCI",		CNP_PMC_LTR_XHCI},
365 	{"Reserved",		CNP_PMC_LTR_RESERVED},
366 	{"ME",			CNP_PMC_LTR_ME},
367 	/* EVA is Enterprise Value Add, doesn't really exist on PCH */
368 	{"EVA",			CNP_PMC_LTR_EVA},
369 	{"SOUTHPORT_C",		CNP_PMC_LTR_SPC},
370 	{"HD_AUDIO",		CNP_PMC_LTR_AZ},
371 	{"CNV",			CNP_PMC_LTR_CNV},
372 	{"LPSS",		CNP_PMC_LTR_LPSS},
373 	{"SOUTHPORT_D",		CNP_PMC_LTR_SPD},
374 	{"SOUTHPORT_E",		CNP_PMC_LTR_SPE},
375 	{"CAMERA",		CNP_PMC_LTR_CAM},
376 	{"ESPI",		CNP_PMC_LTR_ESPI},
377 	{"SCC",			CNP_PMC_LTR_SCC},
378 	{"ISH",			CNP_PMC_LTR_ISH},
379 	{"UFSX2",		CNP_PMC_LTR_UFSX2},
380 	{"EMMC",		CNP_PMC_LTR_EMMC},
381 	/*
382 	 * Check intel_pmc_core_ids[] users of cnp_reg_map for
383 	 * a list of core SoCs using this.
384 	 */
385 	{"WIGIG",		ICL_PMC_LTR_WIGIG},
386 	{"THC0",                TGL_PMC_LTR_THC0},
387 	{"THC1",                TGL_PMC_LTR_THC1},
388 	/* Below two cannot be used for LTR_IGNORE */
389 	{"CURRENT_PLATFORM",	CNP_PMC_LTR_CUR_PLT},
390 	{"AGGREGATED_SYSTEM",	CNP_PMC_LTR_CUR_ASLT},
391 	{}
392 };
393 
394 static const struct pmc_reg_map cnp_reg_map = {
395 	.pfear_sts = ext_cnp_pfear_map,
396 	.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
397 	.slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP,
398 	.slps0_dbg_maps = cnp_slps0_dbg_maps,
399 	.ltr_show_sts = cnp_ltr_show_map,
400 	.msr_sts = msr_map,
401 	.slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
402 	.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
403 	.regmap_length = CNP_PMC_MMIO_REG_LEN,
404 	.ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
405 	.ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
406 	.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
407 	.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
408 	.ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
409 	.etr3_offset = ETR3_OFFSET,
410 };
411 
412 static const struct pmc_reg_map icl_reg_map = {
413 	.pfear_sts = ext_icl_pfear_map,
414 	.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
415 	.slp_s0_res_counter_step = ICL_PMC_SLP_S0_RES_COUNTER_STEP,
416 	.slps0_dbg_maps = cnp_slps0_dbg_maps,
417 	.ltr_show_sts = cnp_ltr_show_map,
418 	.msr_sts = msr_map,
419 	.slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
420 	.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
421 	.regmap_length = CNP_PMC_MMIO_REG_LEN,
422 	.ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
423 	.ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
424 	.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
425 	.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
426 	.ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
427 	.etr3_offset = ETR3_OFFSET,
428 };
429 
430 static const struct pmc_bit_map tgl_clocksource_status_map[] = {
431 	{"USB2PLL_OFF_STS",			BIT(18)},
432 	{"PCIe/USB3.1_Gen2PLL_OFF_STS",		BIT(19)},
433 	{"PCIe_Gen3PLL_OFF_STS",		BIT(20)},
434 	{"OPIOPLL_OFF_STS",			BIT(21)},
435 	{"OCPLL_OFF_STS",			BIT(22)},
436 	{"MainPLL_OFF_STS",			BIT(23)},
437 	{"MIPIPLL_OFF_STS",			BIT(24)},
438 	{"Fast_XTAL_Osc_OFF_STS",		BIT(25)},
439 	{"AC_Ring_Osc_OFF_STS",			BIT(26)},
440 	{"MC_Ring_Osc_OFF_STS",			BIT(27)},
441 	{"SATAPLL_OFF_STS",			BIT(29)},
442 	{"XTAL_USB2PLL_OFF_STS",		BIT(31)},
443 	{}
444 };
445 
446 static const struct pmc_bit_map tgl_power_gating_status_map[] = {
447 	{"CSME_PG_STS",				BIT(0)},
448 	{"SATA_PG_STS",				BIT(1)},
449 	{"xHCI_PG_STS",				BIT(2)},
450 	{"UFSX2_PG_STS",			BIT(3)},
451 	{"OTG_PG_STS",				BIT(5)},
452 	{"SPA_PG_STS",				BIT(6)},
453 	{"SPB_PG_STS",				BIT(7)},
454 	{"SPC_PG_STS",				BIT(8)},
455 	{"SPD_PG_STS",				BIT(9)},
456 	{"SPE_PG_STS",				BIT(10)},
457 	{"SPF_PG_STS",				BIT(11)},
458 	{"LSX_PG_STS",				BIT(13)},
459 	{"P2SB_PG_STS",				BIT(14)},
460 	{"PSF_PG_STS",				BIT(15)},
461 	{"SBR_PG_STS",				BIT(16)},
462 	{"OPIDMI_PG_STS",			BIT(17)},
463 	{"THC0_PG_STS",				BIT(18)},
464 	{"THC1_PG_STS",				BIT(19)},
465 	{"GBETSN_PG_STS",			BIT(20)},
466 	{"GBE_PG_STS",				BIT(21)},
467 	{"LPSS_PG_STS",				BIT(22)},
468 	{"MMP_UFSX2_PG_STS",			BIT(23)},
469 	{"MMP_UFSX2B_PG_STS",			BIT(24)},
470 	{"FIA_PG_STS",				BIT(25)},
471 	{}
472 };
473 
474 static const struct pmc_bit_map tgl_d3_status_map[] = {
475 	{"ADSP_D3_STS",				BIT(0)},
476 	{"SATA_D3_STS",				BIT(1)},
477 	{"xHCI0_D3_STS",			BIT(2)},
478 	{"xDCI1_D3_STS",			BIT(5)},
479 	{"SDX_D3_STS",				BIT(6)},
480 	{"EMMC_D3_STS",				BIT(7)},
481 	{"IS_D3_STS",				BIT(8)},
482 	{"THC0_D3_STS",				BIT(9)},
483 	{"THC1_D3_STS",				BIT(10)},
484 	{"GBE_D3_STS",				BIT(11)},
485 	{"GBE_TSN_D3_STS",			BIT(12)},
486 	{}
487 };
488 
489 static const struct pmc_bit_map tgl_vnn_req_status_map[] = {
490 	{"GPIO_COM0_VNN_REQ_STS",		BIT(1)},
491 	{"GPIO_COM1_VNN_REQ_STS",		BIT(2)},
492 	{"GPIO_COM2_VNN_REQ_STS",		BIT(3)},
493 	{"GPIO_COM3_VNN_REQ_STS",		BIT(4)},
494 	{"GPIO_COM4_VNN_REQ_STS",		BIT(5)},
495 	{"GPIO_COM5_VNN_REQ_STS",		BIT(6)},
496 	{"Audio_VNN_REQ_STS",			BIT(7)},
497 	{"ISH_VNN_REQ_STS",			BIT(8)},
498 	{"CNVI_VNN_REQ_STS",			BIT(9)},
499 	{"eSPI_VNN_REQ_STS",			BIT(10)},
500 	{"Display_VNN_REQ_STS",			BIT(11)},
501 	{"DTS_VNN_REQ_STS",			BIT(12)},
502 	{"SMBUS_VNN_REQ_STS",			BIT(14)},
503 	{"CSME_VNN_REQ_STS",			BIT(15)},
504 	{"SMLINK0_VNN_REQ_STS",			BIT(16)},
505 	{"SMLINK1_VNN_REQ_STS",			BIT(17)},
506 	{"CLINK_VNN_REQ_STS",			BIT(20)},
507 	{"DCI_VNN_REQ_STS",			BIT(21)},
508 	{"ITH_VNN_REQ_STS",			BIT(22)},
509 	{"CSME_VNN_REQ_STS",			BIT(24)},
510 	{"GBE_VNN_REQ_STS",			BIT(25)},
511 	{}
512 };
513 
514 static const struct pmc_bit_map tgl_vnn_misc_status_map[] = {
515 	{"CPU_C10_REQ_STS_0",			BIT(0)},
516 	{"PCIe_LPM_En_REQ_STS_3",		BIT(3)},
517 	{"ITH_REQ_STS_5",			BIT(5)},
518 	{"CNVI_REQ_STS_6",			BIT(6)},
519 	{"ISH_REQ_STS_7",			BIT(7)},
520 	{"USB2_SUS_PG_Sys_REQ_STS_10",		BIT(10)},
521 	{"PCIe_Clk_REQ_STS_12",			BIT(12)},
522 	{"MPHY_Core_DL_REQ_STS_16",		BIT(16)},
523 	{"Break-even_En_REQ_STS_17",		BIT(17)},
524 	{"Auto-demo_En_REQ_STS_18",		BIT(18)},
525 	{"MPHY_SUS_REQ_STS_22",			BIT(22)},
526 	{"xDCI_attached_REQ_STS_24",		BIT(24)},
527 	{}
528 };
529 
530 static const struct pmc_bit_map tgl_signal_status_map[] = {
531 	{"LSX_Wake0_En_STS",			BIT(0)},
532 	{"LSX_Wake0_Pol_STS",			BIT(1)},
533 	{"LSX_Wake1_En_STS",			BIT(2)},
534 	{"LSX_Wake1_Pol_STS",			BIT(3)},
535 	{"LSX_Wake2_En_STS",			BIT(4)},
536 	{"LSX_Wake2_Pol_STS",			BIT(5)},
537 	{"LSX_Wake3_En_STS",			BIT(6)},
538 	{"LSX_Wake3_Pol_STS",			BIT(7)},
539 	{"LSX_Wake4_En_STS",			BIT(8)},
540 	{"LSX_Wake4_Pol_STS",			BIT(9)},
541 	{"LSX_Wake5_En_STS",			BIT(10)},
542 	{"LSX_Wake5_Pol_STS",			BIT(11)},
543 	{"LSX_Wake6_En_STS",			BIT(12)},
544 	{"LSX_Wake6_Pol_STS",			BIT(13)},
545 	{"LSX_Wake7_En_STS",			BIT(14)},
546 	{"LSX_Wake7_Pol_STS",			BIT(15)},
547 	{"Intel_Se_IO_Wake0_En_STS",		BIT(16)},
548 	{"Intel_Se_IO_Wake0_Pol_STS",		BIT(17)},
549 	{"Intel_Se_IO_Wake1_En_STS",		BIT(18)},
550 	{"Intel_Se_IO_Wake1_Pol_STS",		BIT(19)},
551 	{"Int_Timer_SS_Wake0_En_STS",		BIT(20)},
552 	{"Int_Timer_SS_Wake0_Pol_STS",		BIT(21)},
553 	{"Int_Timer_SS_Wake1_En_STS",		BIT(22)},
554 	{"Int_Timer_SS_Wake1_Pol_STS",		BIT(23)},
555 	{"Int_Timer_SS_Wake2_En_STS",		BIT(24)},
556 	{"Int_Timer_SS_Wake2_Pol_STS",		BIT(25)},
557 	{"Int_Timer_SS_Wake3_En_STS",		BIT(26)},
558 	{"Int_Timer_SS_Wake3_Pol_STS",		BIT(27)},
559 	{"Int_Timer_SS_Wake4_En_STS",		BIT(28)},
560 	{"Int_Timer_SS_Wake4_Pol_STS",		BIT(29)},
561 	{"Int_Timer_SS_Wake5_En_STS",		BIT(30)},
562 	{"Int_Timer_SS_Wake5_Pol_STS",		BIT(31)},
563 	{}
564 };
565 
566 static const struct pmc_bit_map *tgl_lpm_maps[] = {
567 	tgl_clocksource_status_map,
568 	tgl_power_gating_status_map,
569 	tgl_d3_status_map,
570 	tgl_vnn_req_status_map,
571 	tgl_vnn_misc_status_map,
572 	tgl_signal_status_map,
573 	NULL
574 };
575 
576 static const struct pmc_reg_map tgl_reg_map = {
577 	.pfear_sts = ext_tgl_pfear_map,
578 	.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
579 	.slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
580 	.ltr_show_sts = cnp_ltr_show_map,
581 	.msr_sts = msr_map,
582 	.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
583 	.regmap_length = CNP_PMC_MMIO_REG_LEN,
584 	.ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
585 	.ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
586 	.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
587 	.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
588 	.ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
589 	.lpm_num_maps = TGL_LPM_NUM_MAPS,
590 	.lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
591 	.lpm_sts_latch_en_offset = TGL_LPM_STS_LATCH_EN_OFFSET,
592 	.lpm_en_offset = TGL_LPM_EN_OFFSET,
593 	.lpm_priority_offset = TGL_LPM_PRI_OFFSET,
594 	.lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET,
595 	.lpm_sts = tgl_lpm_maps,
596 	.lpm_status_offset = TGL_LPM_STATUS_OFFSET,
597 	.lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET,
598 	.etr3_offset = ETR3_OFFSET,
599 };
600 
pmc_core_get_tgl_lpm_reqs(struct platform_device * pdev)601 static void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev)
602 {
603 	struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
604 	const int num_maps = pmcdev->map->lpm_num_maps;
605 	u32 lpm_size = LPM_MAX_NUM_MODES * num_maps * 4;
606 	union acpi_object *out_obj;
607 	struct acpi_device *adev;
608 	guid_t s0ix_dsm_guid;
609 	u32 *lpm_req_regs, *addr;
610 
611 	adev = ACPI_COMPANION(&pdev->dev);
612 	if (!adev)
613 		return;
614 
615 	guid_parse(ACPI_S0IX_DSM_UUID, &s0ix_dsm_guid);
616 
617 	out_obj = acpi_evaluate_dsm(adev->handle, &s0ix_dsm_guid, 0,
618 				    ACPI_GET_LOW_MODE_REGISTERS, NULL);
619 	if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
620 		u32 size = out_obj->buffer.length;
621 
622 		if (size != lpm_size) {
623 			acpi_handle_debug(adev->handle,
624 				"_DSM returned unexpected buffer size, have %u, expect %u\n",
625 				size, lpm_size);
626 			goto free_acpi_obj;
627 		}
628 	} else {
629 		acpi_handle_debug(adev->handle,
630 				  "_DSM function 0 evaluation failed\n");
631 		goto free_acpi_obj;
632 	}
633 
634 	addr = (u32 *)out_obj->buffer.pointer;
635 
636 	lpm_req_regs = devm_kzalloc(&pdev->dev, lpm_size * sizeof(u32),
637 				     GFP_KERNEL);
638 	if (!lpm_req_regs)
639 		goto free_acpi_obj;
640 
641 	memcpy(lpm_req_regs, addr, lpm_size);
642 	pmcdev->lpm_req_regs = lpm_req_regs;
643 
644 free_acpi_obj:
645 	ACPI_FREE(out_obj);
646 }
647 
648 /* Alder Lake: PGD PFET Enable Ack Status Register(s) bitmap */
649 static const struct pmc_bit_map adl_pfear_map[] = {
650 	{"SPI/eSPI",		BIT(2)},
651 	{"XHCI",		BIT(3)},
652 	{"SPA",			BIT(4)},
653 	{"SPB",			BIT(5)},
654 	{"SPC",			BIT(6)},
655 	{"GBE",			BIT(7)},
656 
657 	{"SATA",		BIT(0)},
658 	{"HDA_PGD0",		BIT(1)},
659 	{"HDA_PGD1",		BIT(2)},
660 	{"HDA_PGD2",		BIT(3)},
661 	{"HDA_PGD3",		BIT(4)},
662 	{"SPD",			BIT(5)},
663 	{"LPSS",		BIT(6)},
664 
665 	{"SMB",			BIT(0)},
666 	{"ISH",			BIT(1)},
667 	{"ITH",			BIT(3)},
668 
669 	{"XDCI",		BIT(1)},
670 	{"DCI",			BIT(2)},
671 	{"CSE",			BIT(3)},
672 	{"CSME_KVM",		BIT(4)},
673 	{"CSME_PMT",		BIT(5)},
674 	{"CSME_CLINK",		BIT(6)},
675 	{"CSME_PTIO",		BIT(7)},
676 
677 	{"CSME_USBR",		BIT(0)},
678 	{"CSME_SUSRAM",		BIT(1)},
679 	{"CSME_SMT1",		BIT(2)},
680 	{"CSME_SMS2",		BIT(4)},
681 	{"CSME_SMS1",		BIT(5)},
682 	{"CSME_RTC",		BIT(6)},
683 	{"CSME_PSF",		BIT(7)},
684 
685 	{"CNVI",		BIT(3)},
686 
687 	{"HDA_PGD4",		BIT(2)},
688 	{"HDA_PGD5",		BIT(3)},
689 	{"HDA_PGD6",		BIT(4)},
690 	{}
691 };
692 
693 static const struct pmc_bit_map *ext_adl_pfear_map[] = {
694 	/*
695 	 * Check intel_pmc_core_ids[] users of cnp_reg_map for
696 	 * a list of core SoCs using this.
697 	 */
698 	adl_pfear_map,
699 	NULL
700 };
701 
702 static const struct pmc_bit_map adl_ltr_show_map[] = {
703 	{"SOUTHPORT_A",		CNP_PMC_LTR_SPA},
704 	{"SOUTHPORT_B",		CNP_PMC_LTR_SPB},
705 	{"SATA",		CNP_PMC_LTR_SATA},
706 	{"GIGABIT_ETHERNET",	CNP_PMC_LTR_GBE},
707 	{"XHCI",		CNP_PMC_LTR_XHCI},
708 	{"SOUTHPORT_F",		ADL_PMC_LTR_SPF},
709 	{"ME",			CNP_PMC_LTR_ME},
710 	/* EVA is Enterprise Value Add, doesn't really exist on PCH */
711 	{"SATA1",		CNP_PMC_LTR_EVA},
712 	{"SOUTHPORT_C",		CNP_PMC_LTR_SPC},
713 	{"HD_AUDIO",		CNP_PMC_LTR_AZ},
714 	{"CNV",			CNP_PMC_LTR_CNV},
715 	{"LPSS",		CNP_PMC_LTR_LPSS},
716 	{"SOUTHPORT_D",		CNP_PMC_LTR_SPD},
717 	{"SOUTHPORT_E",		CNP_PMC_LTR_SPE},
718 	{"SATA2",		CNP_PMC_LTR_CAM},
719 	{"ESPI",		CNP_PMC_LTR_ESPI},
720 	{"SCC",			CNP_PMC_LTR_SCC},
721 	{"ISH",			CNP_PMC_LTR_ISH},
722 	{"UFSX2",		CNP_PMC_LTR_UFSX2},
723 	{"EMMC",		CNP_PMC_LTR_EMMC},
724 	/*
725 	 * Check intel_pmc_core_ids[] users of cnp_reg_map for
726 	 * a list of core SoCs using this.
727 	 */
728 	{"WIGIG",		ICL_PMC_LTR_WIGIG},
729 	{"THC0",		TGL_PMC_LTR_THC0},
730 	{"THC1",		TGL_PMC_LTR_THC1},
731 	{"SOUTHPORT_G",		CNP_PMC_LTR_RESERVED},
732 
733 	/* Below two cannot be used for LTR_IGNORE */
734 	{"CURRENT_PLATFORM",	CNP_PMC_LTR_CUR_PLT},
735 	{"AGGREGATED_SYSTEM",	CNP_PMC_LTR_CUR_ASLT},
736 	{}
737 };
738 
739 static const struct pmc_bit_map adl_clocksource_status_map[] = {
740 	{"CLKPART1_OFF_STS",			BIT(0)},
741 	{"CLKPART2_OFF_STS",			BIT(1)},
742 	{"CLKPART3_OFF_STS",			BIT(2)},
743 	{"CLKPART4_OFF_STS",			BIT(3)},
744 	{"CLKPART5_OFF_STS",			BIT(4)},
745 	{"CLKPART6_OFF_STS",			BIT(5)},
746 	{"CLKPART7_OFF_STS",			BIT(6)},
747 	{"CLKPART8_OFF_STS",			BIT(7)},
748 	{"PCIE0PLL_OFF_STS",			BIT(10)},
749 	{"PCIE1PLL_OFF_STS",			BIT(11)},
750 	{"PCIE2PLL_OFF_STS",			BIT(12)},
751 	{"PCIE3PLL_OFF_STS",			BIT(13)},
752 	{"PCIE4PLL_OFF_STS",			BIT(14)},
753 	{"PCIE5PLL_OFF_STS",			BIT(15)},
754 	{"PCIE6PLL_OFF_STS",			BIT(16)},
755 	{"USB2PLL_OFF_STS",			BIT(18)},
756 	{"OCPLL_OFF_STS",			BIT(22)},
757 	{"AUDIOPLL_OFF_STS",			BIT(23)},
758 	{"GBEPLL_OFF_STS",			BIT(24)},
759 	{"Fast_XTAL_Osc_OFF_STS",		BIT(25)},
760 	{"AC_Ring_Osc_OFF_STS",			BIT(26)},
761 	{"MC_Ring_Osc_OFF_STS",			BIT(27)},
762 	{"SATAPLL_OFF_STS",			BIT(29)},
763 	{"USB3PLL_OFF_STS",			BIT(31)},
764 	{}
765 };
766 
767 static const struct pmc_bit_map adl_power_gating_status_0_map[] = {
768 	{"PMC_PGD0_PG_STS",			BIT(0)},
769 	{"DMI_PGD0_PG_STS",			BIT(1)},
770 	{"ESPISPI_PGD0_PG_STS",			BIT(2)},
771 	{"XHCI_PGD0_PG_STS",			BIT(3)},
772 	{"SPA_PGD0_PG_STS",			BIT(4)},
773 	{"SPB_PGD0_PG_STS",			BIT(5)},
774 	{"SPC_PGD0_PG_STS",			BIT(6)},
775 	{"GBE_PGD0_PG_STS",			BIT(7)},
776 	{"SATA_PGD0_PG_STS",			BIT(8)},
777 	{"DSP_PGD0_PG_STS",			BIT(9)},
778 	{"DSP_PGD1_PG_STS",			BIT(10)},
779 	{"DSP_PGD2_PG_STS",			BIT(11)},
780 	{"DSP_PGD3_PG_STS",			BIT(12)},
781 	{"SPD_PGD0_PG_STS",			BIT(13)},
782 	{"LPSS_PGD0_PG_STS",			BIT(14)},
783 	{"SMB_PGD0_PG_STS",			BIT(16)},
784 	{"ISH_PGD0_PG_STS",			BIT(17)},
785 	{"NPK_PGD0_PG_STS",			BIT(19)},
786 	{"PECI_PGD0_PG_STS",			BIT(21)},
787 	{"XDCI_PGD0_PG_STS",			BIT(25)},
788 	{"EXI_PGD0_PG_STS",			BIT(26)},
789 	{"CSE_PGD0_PG_STS",			BIT(27)},
790 	{"KVMCC_PGD0_PG_STS",			BIT(28)},
791 	{"PMT_PGD0_PG_STS",			BIT(29)},
792 	{"CLINK_PGD0_PG_STS",			BIT(30)},
793 	{"PTIO_PGD0_PG_STS",			BIT(31)},
794 	{}
795 };
796 
797 static const struct pmc_bit_map adl_power_gating_status_1_map[] = {
798 	{"USBR0_PGD0_PG_STS",			BIT(0)},
799 	{"SMT1_PGD0_PG_STS",			BIT(2)},
800 	{"CSMERTC_PGD0_PG_STS",			BIT(6)},
801 	{"CSMEPSF_PGD0_PG_STS",			BIT(7)},
802 	{"CNVI_PGD0_PG_STS",			BIT(19)},
803 	{"DSP_PGD4_PG_STS",			BIT(26)},
804 	{"SPG_PGD0_PG_STS",			BIT(27)},
805 	{"SPE_PGD0_PG_STS",			BIT(28)},
806 	{}
807 };
808 
809 static const struct pmc_bit_map adl_power_gating_status_2_map[] = {
810 	{"THC0_PGD0_PG_STS",			BIT(7)},
811 	{"THC1_PGD0_PG_STS",			BIT(8)},
812 	{"SPF_PGD0_PG_STS",			BIT(14)},
813 	{}
814 };
815 
816 static const struct pmc_bit_map adl_d3_status_0_map[] = {
817 	{"ISH_D3_STS",				BIT(2)},
818 	{"LPSS_D3_STS",				BIT(3)},
819 	{"XDCI_D3_STS",				BIT(4)},
820 	{"XHCI_D3_STS",				BIT(5)},
821 	{"SPA_D3_STS",				BIT(12)},
822 	{"SPB_D3_STS",				BIT(13)},
823 	{"SPC_D3_STS",				BIT(14)},
824 	{"SPD_D3_STS",				BIT(15)},
825 	{"SPE_D3_STS",				BIT(16)},
826 	{"DSP_D3_STS",				BIT(19)},
827 	{"SATA_D3_STS",				BIT(20)},
828 	{"DMI_D3_STS",				BIT(22)},
829 	{}
830 };
831 
832 static const struct pmc_bit_map adl_d3_status_1_map[] = {
833 	{"GBE_D3_STS",				BIT(19)},
834 	{"CNVI_D3_STS",				BIT(27)},
835 	{}
836 };
837 
838 static const struct pmc_bit_map adl_d3_status_2_map[] = {
839 	{"CSMERTC_D3_STS",			BIT(1)},
840 	{"CSE_D3_STS",				BIT(4)},
841 	{"KVMCC_D3_STS",			BIT(5)},
842 	{"USBR0_D3_STS",			BIT(6)},
843 	{"SMT1_D3_STS",				BIT(8)},
844 	{"PTIO_D3_STS",				BIT(16)},
845 	{"PMT_D3_STS",				BIT(17)},
846 	{}
847 };
848 
849 static const struct pmc_bit_map adl_d3_status_3_map[] = {
850 	{"THC0_D3_STS",				BIT(14)},
851 	{"THC1_D3_STS",				BIT(15)},
852 	{}
853 };
854 
855 static const struct pmc_bit_map adl_vnn_req_status_0_map[] = {
856 	{"ISH_VNN_REQ_STS",			BIT(2)},
857 	{"ESPISPI_VNN_REQ_STS",			BIT(18)},
858 	{"DSP_VNN_REQ_STS",			BIT(19)},
859 	{}
860 };
861 
862 static const struct pmc_bit_map adl_vnn_req_status_1_map[] = {
863 	{"NPK_VNN_REQ_STS",			BIT(4)},
864 	{"EXI_VNN_REQ_STS",			BIT(9)},
865 	{"GBE_VNN_REQ_STS",			BIT(19)},
866 	{"SMB_VNN_REQ_STS",			BIT(25)},
867 	{"CNVI_VNN_REQ_STS",			BIT(27)},
868 	{}
869 };
870 
871 static const struct pmc_bit_map adl_vnn_req_status_2_map[] = {
872 	{"CSMERTC_VNN_REQ_STS",			BIT(1)},
873 	{"CSE_VNN_REQ_STS",			BIT(4)},
874 	{"SMT1_VNN_REQ_STS",			BIT(8)},
875 	{"CLINK_VNN_REQ_STS",			BIT(14)},
876 	{"GPIOCOM4_VNN_REQ_STS",		BIT(20)},
877 	{"GPIOCOM3_VNN_REQ_STS",		BIT(21)},
878 	{"GPIOCOM2_VNN_REQ_STS",		BIT(22)},
879 	{"GPIOCOM1_VNN_REQ_STS",		BIT(23)},
880 	{"GPIOCOM0_VNN_REQ_STS",		BIT(24)},
881 	{}
882 };
883 
884 static const struct pmc_bit_map adl_vnn_req_status_3_map[] = {
885 	{"GPIOCOM5_VNN_REQ_STS",		BIT(11)},
886 	{}
887 };
888 
889 static const struct pmc_bit_map adl_vnn_misc_status_map[] = {
890 	{"CPU_C10_REQ_STS",			BIT(0)},
891 	{"PCIe_LPM_En_REQ_STS",			BIT(3)},
892 	{"ITH_REQ_STS",				BIT(5)},
893 	{"CNVI_REQ_STS",			BIT(6)},
894 	{"ISH_REQ_STS",				BIT(7)},
895 	{"USB2_SUS_PG_Sys_REQ_STS",		BIT(10)},
896 	{"PCIe_Clk_REQ_STS",			BIT(12)},
897 	{"MPHY_Core_DL_REQ_STS",		BIT(16)},
898 	{"Break-even_En_REQ_STS",		BIT(17)},
899 	{"MPHY_SUS_REQ_STS",			BIT(22)},
900 	{"xDCI_attached_REQ_STS",		BIT(24)},
901 	{}
902 };
903 
904 static const struct pmc_bit_map *adl_lpm_maps[] = {
905 	adl_clocksource_status_map,
906 	adl_power_gating_status_0_map,
907 	adl_power_gating_status_1_map,
908 	adl_power_gating_status_2_map,
909 	adl_d3_status_0_map,
910 	adl_d3_status_1_map,
911 	adl_d3_status_2_map,
912 	adl_d3_status_3_map,
913 	adl_vnn_req_status_0_map,
914 	adl_vnn_req_status_1_map,
915 	adl_vnn_req_status_2_map,
916 	adl_vnn_req_status_3_map,
917 	adl_vnn_misc_status_map,
918 	tgl_signal_status_map,
919 	NULL
920 };
921 
922 static const struct pmc_reg_map adl_reg_map = {
923 	.pfear_sts = ext_adl_pfear_map,
924 	.slp_s0_offset = ADL_PMC_SLP_S0_RES_COUNTER_OFFSET,
925 	.slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
926 	.ltr_show_sts = adl_ltr_show_map,
927 	.msr_sts = msr_map,
928 	.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
929 	.regmap_length = CNP_PMC_MMIO_REG_LEN,
930 	.ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
931 	.ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
932 	.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
933 	.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
934 	.ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED,
935 	.lpm_num_modes = ADL_LPM_NUM_MODES,
936 	.lpm_num_maps = ADL_LPM_NUM_MAPS,
937 	.lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
938 	.etr3_offset = ETR3_OFFSET,
939 	.lpm_sts_latch_en_offset = ADL_LPM_STATUS_LATCH_EN_OFFSET,
940 	.lpm_priority_offset = ADL_LPM_PRI_OFFSET,
941 	.lpm_en_offset = ADL_LPM_EN_OFFSET,
942 	.lpm_residency_offset = ADL_LPM_RESIDENCY_OFFSET,
943 	.lpm_sts = adl_lpm_maps,
944 	.lpm_status_offset = ADL_LPM_STATUS_OFFSET,
945 	.lpm_live_status_offset = ADL_LPM_LIVE_STATUS_OFFSET,
946 };
947 
pmc_core_reg_read(struct pmc_dev * pmcdev,int reg_offset)948 static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
949 {
950 	return readl(pmcdev->regbase + reg_offset);
951 }
952 
pmc_core_reg_write(struct pmc_dev * pmcdev,int reg_offset,u32 val)953 static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
954 				      u32 val)
955 {
956 	writel(val, pmcdev->regbase + reg_offset);
957 }
958 
pmc_core_adjust_slp_s0_step(struct pmc_dev * pmcdev,u32 value)959 static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
960 {
961 	/*
962 	 * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are
963 	 * used as a workaround which uses 30.5 usec tick. All other client
964 	 * programs have the legacy SLP_S0 residency counter that is using the 122
965 	 * usec tick.
966 	 */
967 	const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
968 
969 	if (pmcdev->map == &adl_reg_map)
970 		return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2);
971 	else
972 		return (u64)value * pmcdev->map->slp_s0_res_counter_step;
973 }
974 
set_etr3(struct pmc_dev * pmcdev)975 static int set_etr3(struct pmc_dev *pmcdev)
976 {
977 	const struct pmc_reg_map *map = pmcdev->map;
978 	u32 reg;
979 	int err;
980 
981 	if (!map->etr3_offset)
982 		return -EOPNOTSUPP;
983 
984 	mutex_lock(&pmcdev->lock);
985 
986 	/* check if CF9 is locked */
987 	reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
988 	if (reg & ETR3_CF9LOCK) {
989 		err = -EACCES;
990 		goto out_unlock;
991 	}
992 
993 	/* write CF9 global reset bit */
994 	reg |= ETR3_CF9GR;
995 	pmc_core_reg_write(pmcdev, map->etr3_offset, reg);
996 
997 	reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
998 	if (!(reg & ETR3_CF9GR)) {
999 		err = -EIO;
1000 		goto out_unlock;
1001 	}
1002 
1003 	err = 0;
1004 
1005 out_unlock:
1006 	mutex_unlock(&pmcdev->lock);
1007 	return err;
1008 }
etr3_is_visible(struct kobject * kobj,struct attribute * attr,int idx)1009 static umode_t etr3_is_visible(struct kobject *kobj,
1010 				struct attribute *attr,
1011 				int idx)
1012 {
1013 	struct device *dev = container_of(kobj, struct device, kobj);
1014 	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1015 	const struct pmc_reg_map *map = pmcdev->map;
1016 	u32 reg;
1017 
1018 	mutex_lock(&pmcdev->lock);
1019 	reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
1020 	mutex_unlock(&pmcdev->lock);
1021 
1022 	return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
1023 }
1024 
etr3_show(struct device * dev,struct device_attribute * attr,char * buf)1025 static ssize_t etr3_show(struct device *dev,
1026 				 struct device_attribute *attr, char *buf)
1027 {
1028 	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1029 	const struct pmc_reg_map *map = pmcdev->map;
1030 	u32 reg;
1031 
1032 	if (!map->etr3_offset)
1033 		return -EOPNOTSUPP;
1034 
1035 	mutex_lock(&pmcdev->lock);
1036 
1037 	reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
1038 	reg &= ETR3_CF9GR | ETR3_CF9LOCK;
1039 
1040 	mutex_unlock(&pmcdev->lock);
1041 
1042 	return sysfs_emit(buf, "0x%08x", reg);
1043 }
1044 
etr3_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1045 static ssize_t etr3_store(struct device *dev,
1046 				  struct device_attribute *attr,
1047 				  const char *buf, size_t len)
1048 {
1049 	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1050 	int err;
1051 	u32 reg;
1052 
1053 	err = kstrtouint(buf, 16, &reg);
1054 	if (err)
1055 		return err;
1056 
1057 	/* allow only CF9 writes */
1058 	if (reg != ETR3_CF9GR)
1059 		return -EINVAL;
1060 
1061 	err = set_etr3(pmcdev);
1062 	if (err)
1063 		return err;
1064 
1065 	return len;
1066 }
1067 static DEVICE_ATTR_RW(etr3);
1068 
1069 static struct attribute *pmc_attrs[] = {
1070 	&dev_attr_etr3.attr,
1071 	NULL
1072 };
1073 
1074 static const struct attribute_group pmc_attr_group = {
1075 	.attrs = pmc_attrs,
1076 	.is_visible = etr3_is_visible,
1077 };
1078 
1079 static const struct attribute_group *pmc_dev_groups[] = {
1080 	&pmc_attr_group,
1081 	NULL
1082 };
1083 
pmc_core_dev_state_get(void * data,u64 * val)1084 static int pmc_core_dev_state_get(void *data, u64 *val)
1085 {
1086 	struct pmc_dev *pmcdev = data;
1087 	const struct pmc_reg_map *map = pmcdev->map;
1088 	u32 value;
1089 
1090 	value = pmc_core_reg_read(pmcdev, map->slp_s0_offset);
1091 	*val = pmc_core_adjust_slp_s0_step(pmcdev, value);
1092 
1093 	return 0;
1094 }
1095 
1096 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
1097 
pmc_core_check_read_lock_bit(struct pmc_dev * pmcdev)1098 static int pmc_core_check_read_lock_bit(struct pmc_dev *pmcdev)
1099 {
1100 	u32 value;
1101 
1102 	value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset);
1103 	return value & BIT(pmcdev->map->pm_read_disable_bit);
1104 }
1105 
pmc_core_slps0_display(struct pmc_dev * pmcdev,struct device * dev,struct seq_file * s)1106 static void pmc_core_slps0_display(struct pmc_dev *pmcdev, struct device *dev,
1107 				   struct seq_file *s)
1108 {
1109 	const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
1110 	const struct pmc_bit_map *map;
1111 	int offset = pmcdev->map->slps0_dbg_offset;
1112 	u32 data;
1113 
1114 	while (*maps) {
1115 		map = *maps;
1116 		data = pmc_core_reg_read(pmcdev, offset);
1117 		offset += 4;
1118 		while (map->name) {
1119 			if (dev)
1120 				dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
1121 					map->name,
1122 					data & map->bit_mask ? "Yes" : "No");
1123 			if (s)
1124 				seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
1125 					   map->name,
1126 					   data & map->bit_mask ? "Yes" : "No");
1127 			++map;
1128 		}
1129 		++maps;
1130 	}
1131 }
1132 
pmc_core_lpm_get_arr_size(const struct pmc_bit_map ** maps)1133 static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
1134 {
1135 	int idx;
1136 
1137 	for (idx = 0; maps[idx]; idx++)
1138 		;/* Nothing */
1139 
1140 	return idx;
1141 }
1142 
pmc_core_lpm_display(struct pmc_dev * pmcdev,struct device * dev,struct seq_file * s,u32 offset,const char * str,const struct pmc_bit_map ** maps)1143 static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev,
1144 				 struct seq_file *s, u32 offset,
1145 				 const char *str,
1146 				 const struct pmc_bit_map **maps)
1147 {
1148 	int index, idx, len = 32, bit_mask, arr_size;
1149 	u32 *lpm_regs;
1150 
1151 	arr_size = pmc_core_lpm_get_arr_size(maps);
1152 	lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
1153 	if (!lpm_regs)
1154 		return;
1155 
1156 	for (index = 0; index < arr_size; index++) {
1157 		lpm_regs[index] = pmc_core_reg_read(pmcdev, offset);
1158 		offset += 4;
1159 	}
1160 
1161 	for (idx = 0; idx < arr_size; idx++) {
1162 		if (dev)
1163 			dev_info(dev, "\nLPM_%s_%d:\t0x%x\n", str, idx,
1164 				lpm_regs[idx]);
1165 		if (s)
1166 			seq_printf(s, "\nLPM_%s_%d:\t0x%x\n", str, idx,
1167 				   lpm_regs[idx]);
1168 		for (index = 0; maps[idx][index].name && index < len; index++) {
1169 			bit_mask = maps[idx][index].bit_mask;
1170 			if (dev)
1171 				dev_info(dev, "%-30s %-30d\n",
1172 					maps[idx][index].name,
1173 					lpm_regs[idx] & bit_mask ? 1 : 0);
1174 			if (s)
1175 				seq_printf(s, "%-30s %-30d\n",
1176 					   maps[idx][index].name,
1177 					   lpm_regs[idx] & bit_mask ? 1 : 0);
1178 		}
1179 	}
1180 
1181 	kfree(lpm_regs);
1182 }
1183 
1184 static bool slps0_dbg_latch;
1185 
pmc_core_reg_read_byte(struct pmc_dev * pmcdev,int offset)1186 static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
1187 {
1188 	return readb(pmcdev->regbase + offset);
1189 }
1190 
pmc_core_display_map(struct seq_file * s,int index,int idx,int ip,u8 pf_reg,const struct pmc_bit_map ** pf_map)1191 static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
1192 				 u8 pf_reg, const struct pmc_bit_map **pf_map)
1193 {
1194 	seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
1195 		   ip, pf_map[idx][index].name,
1196 		   pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
1197 }
1198 
pmc_core_ppfear_show(struct seq_file * s,void * unused)1199 static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
1200 {
1201 	struct pmc_dev *pmcdev = s->private;
1202 	const struct pmc_bit_map **maps = pmcdev->map->pfear_sts;
1203 	u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
1204 	int index, iter, idx, ip = 0;
1205 
1206 	iter = pmcdev->map->ppfear0_offset;
1207 
1208 	for (index = 0; index < pmcdev->map->ppfear_buckets &&
1209 	     index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
1210 		pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
1211 
1212 	for (idx = 0; maps[idx]; idx++) {
1213 		for (index = 0; maps[idx][index].name &&
1214 		     index < pmcdev->map->ppfear_buckets * 8; ip++, index++)
1215 			pmc_core_display_map(s, index, idx, ip,
1216 					     pf_regs[index / 8], maps);
1217 	}
1218 
1219 	return 0;
1220 }
1221 DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
1222 
1223 /* This function should return link status, 0 means ready */
pmc_core_mtpmc_link_status(struct pmc_dev * pmcdev)1224 static int pmc_core_mtpmc_link_status(struct pmc_dev *pmcdev)
1225 {
1226 	u32 value;
1227 
1228 	value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
1229 	return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
1230 }
1231 
pmc_core_send_msg(struct pmc_dev * pmcdev,u32 * addr_xram)1232 static int pmc_core_send_msg(struct pmc_dev *pmcdev, u32 *addr_xram)
1233 {
1234 	u32 dest;
1235 	int timeout;
1236 
1237 	for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
1238 		if (pmc_core_mtpmc_link_status(pmcdev) == 0)
1239 			break;
1240 		msleep(5);
1241 	}
1242 
1243 	if (timeout <= 0 && pmc_core_mtpmc_link_status(pmcdev))
1244 		return -EBUSY;
1245 
1246 	dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
1247 	pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest);
1248 	return 0;
1249 }
1250 
pmc_core_mphy_pg_show(struct seq_file * s,void * unused)1251 static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
1252 {
1253 	struct pmc_dev *pmcdev = s->private;
1254 	const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
1255 	u32 mphy_core_reg_low, mphy_core_reg_high;
1256 	u32 val_low, val_high;
1257 	int index, err = 0;
1258 
1259 	if (pmcdev->pmc_xram_read_bit) {
1260 		seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
1261 		return 0;
1262 	}
1263 
1264 	mphy_core_reg_low  = (SPT_PMC_MPHY_CORE_STS_0 << 16);
1265 	mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
1266 
1267 	mutex_lock(&pmcdev->lock);
1268 
1269 	if (pmc_core_send_msg(pmcdev, &mphy_core_reg_low) != 0) {
1270 		err = -EBUSY;
1271 		goto out_unlock;
1272 	}
1273 
1274 	msleep(10);
1275 	val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
1276 
1277 	if (pmc_core_send_msg(pmcdev, &mphy_core_reg_high) != 0) {
1278 		err = -EBUSY;
1279 		goto out_unlock;
1280 	}
1281 
1282 	msleep(10);
1283 	val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
1284 
1285 	for (index = 0; index < 8 && map[index].name; index++) {
1286 		seq_printf(s, "%-32s\tState: %s\n",
1287 			   map[index].name,
1288 			   map[index].bit_mask & val_low ? "Not power gated" :
1289 			   "Power gated");
1290 	}
1291 
1292 	for (index = 8; map[index].name; index++) {
1293 		seq_printf(s, "%-32s\tState: %s\n",
1294 			   map[index].name,
1295 			   map[index].bit_mask & val_high ? "Not power gated" :
1296 			   "Power gated");
1297 	}
1298 
1299 out_unlock:
1300 	mutex_unlock(&pmcdev->lock);
1301 	return err;
1302 }
1303 DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
1304 
pmc_core_pll_show(struct seq_file * s,void * unused)1305 static int pmc_core_pll_show(struct seq_file *s, void *unused)
1306 {
1307 	struct pmc_dev *pmcdev = s->private;
1308 	const struct pmc_bit_map *map = pmcdev->map->pll_sts;
1309 	u32 mphy_common_reg, val;
1310 	int index, err = 0;
1311 
1312 	if (pmcdev->pmc_xram_read_bit) {
1313 		seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
1314 		return 0;
1315 	}
1316 
1317 	mphy_common_reg  = (SPT_PMC_MPHY_COM_STS_0 << 16);
1318 	mutex_lock(&pmcdev->lock);
1319 
1320 	if (pmc_core_send_msg(pmcdev, &mphy_common_reg) != 0) {
1321 		err = -EBUSY;
1322 		goto out_unlock;
1323 	}
1324 
1325 	/* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
1326 	msleep(10);
1327 	val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
1328 
1329 	for (index = 0; map[index].name ; index++) {
1330 		seq_printf(s, "%-32s\tState: %s\n",
1331 			   map[index].name,
1332 			   map[index].bit_mask & val ? "Active" : "Idle");
1333 	}
1334 
1335 out_unlock:
1336 	mutex_unlock(&pmcdev->lock);
1337 	return err;
1338 }
1339 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
1340 
pmc_core_send_ltr_ignore(struct pmc_dev * pmcdev,u32 value)1341 static int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value)
1342 {
1343 	const struct pmc_reg_map *map = pmcdev->map;
1344 	u32 reg;
1345 	int err = 0;
1346 
1347 	mutex_lock(&pmcdev->lock);
1348 
1349 	if (value > map->ltr_ignore_max) {
1350 		err = -EINVAL;
1351 		goto out_unlock;
1352 	}
1353 
1354 	reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
1355 	reg |= BIT(value);
1356 	pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg);
1357 
1358 out_unlock:
1359 	mutex_unlock(&pmcdev->lock);
1360 
1361 	return err;
1362 }
1363 
pmc_core_ltr_ignore_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)1364 static ssize_t pmc_core_ltr_ignore_write(struct file *file,
1365 					 const char __user *userbuf,
1366 					 size_t count, loff_t *ppos)
1367 {
1368 	struct seq_file *s = file->private_data;
1369 	struct pmc_dev *pmcdev = s->private;
1370 	u32 buf_size, value;
1371 	int err;
1372 
1373 	buf_size = min_t(u32, count, 64);
1374 
1375 	err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
1376 	if (err)
1377 		return err;
1378 
1379 	err = pmc_core_send_ltr_ignore(pmcdev, value);
1380 
1381 	return err == 0 ? count : err;
1382 }
1383 
pmc_core_ltr_ignore_show(struct seq_file * s,void * unused)1384 static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
1385 {
1386 	return 0;
1387 }
1388 
pmc_core_ltr_ignore_open(struct inode * inode,struct file * file)1389 static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
1390 {
1391 	return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
1392 }
1393 
1394 static const struct file_operations pmc_core_ltr_ignore_ops = {
1395 	.open           = pmc_core_ltr_ignore_open,
1396 	.read           = seq_read,
1397 	.write          = pmc_core_ltr_ignore_write,
1398 	.llseek         = seq_lseek,
1399 	.release        = single_release,
1400 };
1401 
pmc_core_slps0_dbg_latch(struct pmc_dev * pmcdev,bool reset)1402 static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
1403 {
1404 	const struct pmc_reg_map *map = pmcdev->map;
1405 	u32 fd;
1406 
1407 	mutex_lock(&pmcdev->lock);
1408 
1409 	if (!reset && !slps0_dbg_latch)
1410 		goto out_unlock;
1411 
1412 	fd = pmc_core_reg_read(pmcdev, map->slps0_dbg_offset);
1413 	if (reset)
1414 		fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
1415 	else
1416 		fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
1417 	pmc_core_reg_write(pmcdev, map->slps0_dbg_offset, fd);
1418 
1419 	slps0_dbg_latch = false;
1420 
1421 out_unlock:
1422 	mutex_unlock(&pmcdev->lock);
1423 }
1424 
pmc_core_slps0_dbg_show(struct seq_file * s,void * unused)1425 static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
1426 {
1427 	struct pmc_dev *pmcdev = s->private;
1428 
1429 	pmc_core_slps0_dbg_latch(pmcdev, false);
1430 	pmc_core_slps0_display(pmcdev, NULL, s);
1431 	pmc_core_slps0_dbg_latch(pmcdev, true);
1432 
1433 	return 0;
1434 }
1435 DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
1436 
convert_ltr_scale(u32 val)1437 static u32 convert_ltr_scale(u32 val)
1438 {
1439 	/*
1440 	 * As per PCIE specification supporting document
1441 	 * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
1442 	 * Tolerance Reporting data payload is encoded in a
1443 	 * 3 bit scale and 10 bit value fields. Values are
1444 	 * multiplied by the indicated scale to yield an absolute time
1445 	 * value, expressible in a range from 1 nanosecond to
1446 	 * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
1447 	 *
1448 	 * scale encoding is as follows:
1449 	 *
1450 	 * ----------------------------------------------
1451 	 * |scale factor	|	Multiplier (ns)	|
1452 	 * ----------------------------------------------
1453 	 * |	0		|	1		|
1454 	 * |	1		|	32		|
1455 	 * |	2		|	1024		|
1456 	 * |	3		|	32768		|
1457 	 * |	4		|	1048576		|
1458 	 * |	5		|	33554432	|
1459 	 * |	6		|	Invalid		|
1460 	 * |	7		|	Invalid		|
1461 	 * ----------------------------------------------
1462 	 */
1463 	if (val > 5) {
1464 		pr_warn("Invalid LTR scale factor.\n");
1465 		return 0;
1466 	}
1467 
1468 	return 1U << (5 * val);
1469 }
1470 
pmc_core_ltr_show(struct seq_file * s,void * unused)1471 static int pmc_core_ltr_show(struct seq_file *s, void *unused)
1472 {
1473 	struct pmc_dev *pmcdev = s->private;
1474 	const struct pmc_bit_map *map = pmcdev->map->ltr_show_sts;
1475 	u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
1476 	u32 ltr_raw_data, scale, val;
1477 	u16 snoop_ltr, nonsnoop_ltr;
1478 	int index;
1479 
1480 	for (index = 0; map[index].name ; index++) {
1481 		decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
1482 		ltr_raw_data = pmc_core_reg_read(pmcdev,
1483 						 map[index].bit_mask);
1484 		snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
1485 		nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
1486 
1487 		if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
1488 			scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
1489 			val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
1490 			decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
1491 		}
1492 
1493 		if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
1494 			scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
1495 			val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
1496 			decoded_snoop_ltr = val * convert_ltr_scale(scale);
1497 		}
1498 
1499 		seq_printf(s, "%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
1500 			   map[index].name, ltr_raw_data,
1501 			   decoded_non_snoop_ltr,
1502 			   decoded_snoop_ltr);
1503 	}
1504 	return 0;
1505 }
1506 DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
1507 
adjust_lpm_residency(struct pmc_dev * pmcdev,u32 offset,const int lpm_adj_x2)1508 static inline u64 adjust_lpm_residency(struct pmc_dev *pmcdev, u32 offset,
1509 				       const int lpm_adj_x2)
1510 {
1511 	u64 lpm_res = pmc_core_reg_read(pmcdev, offset);
1512 
1513 	return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res);
1514 }
1515 
pmc_core_substate_res_show(struct seq_file * s,void * unused)1516 static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
1517 {
1518 	struct pmc_dev *pmcdev = s->private;
1519 	const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
1520 	u32 offset = pmcdev->map->lpm_residency_offset;
1521 	int i, mode;
1522 
1523 	seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
1524 
1525 	pmc_for_each_mode(i, mode, pmcdev) {
1526 		seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
1527 			   adjust_lpm_residency(pmcdev, offset + (4 * mode), lpm_adj_x2));
1528 	}
1529 
1530 	return 0;
1531 }
1532 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
1533 
pmc_core_substate_sts_regs_show(struct seq_file * s,void * unused)1534 static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
1535 {
1536 	struct pmc_dev *pmcdev = s->private;
1537 	const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
1538 	u32 offset = pmcdev->map->lpm_status_offset;
1539 
1540 	pmc_core_lpm_display(pmcdev, NULL, s, offset, "STATUS", maps);
1541 
1542 	return 0;
1543 }
1544 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
1545 
pmc_core_substate_l_sts_regs_show(struct seq_file * s,void * unused)1546 static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
1547 {
1548 	struct pmc_dev *pmcdev = s->private;
1549 	const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
1550 	u32 offset = pmcdev->map->lpm_live_status_offset;
1551 
1552 	pmc_core_lpm_display(pmcdev, NULL, s, offset, "LIVE_STATUS", maps);
1553 
1554 	return 0;
1555 }
1556 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
1557 
pmc_core_substate_req_header_show(struct seq_file * s)1558 static void pmc_core_substate_req_header_show(struct seq_file *s)
1559 {
1560 	struct pmc_dev *pmcdev = s->private;
1561 	int i, mode;
1562 
1563 	seq_printf(s, "%30s |", "Element");
1564 	pmc_for_each_mode(i, mode, pmcdev)
1565 		seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
1566 
1567 	seq_printf(s, " %9s |\n", "Status");
1568 }
1569 
pmc_core_substate_req_regs_show(struct seq_file * s,void * unused)1570 static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
1571 {
1572 	struct pmc_dev *pmcdev = s->private;
1573 	const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
1574 	const struct pmc_bit_map *map;
1575 	const int num_maps = pmcdev->map->lpm_num_maps;
1576 	u32 sts_offset = pmcdev->map->lpm_status_offset;
1577 	u32 *lpm_req_regs = pmcdev->lpm_req_regs;
1578 	int mp;
1579 
1580 	/* Display the header */
1581 	pmc_core_substate_req_header_show(s);
1582 
1583 	/* Loop over maps */
1584 	for (mp = 0; mp < num_maps; mp++) {
1585 		u32 req_mask = 0;
1586 		u32 lpm_status;
1587 		int mode, idx, i, len = 32;
1588 
1589 		/*
1590 		 * Capture the requirements and create a mask so that we only
1591 		 * show an element if it's required for at least one of the
1592 		 * enabled low power modes
1593 		 */
1594 		pmc_for_each_mode(idx, mode, pmcdev)
1595 			req_mask |= lpm_req_regs[mp + (mode * num_maps)];
1596 
1597 		/* Get the last latched status for this map */
1598 		lpm_status = pmc_core_reg_read(pmcdev, sts_offset + (mp * 4));
1599 
1600 		/*  Loop over elements in this map */
1601 		map = maps[mp];
1602 		for (i = 0; map[i].name && i < len; i++) {
1603 			u32 bit_mask = map[i].bit_mask;
1604 
1605 			if (!(bit_mask & req_mask))
1606 				/*
1607 				 * Not required for any enabled states
1608 				 * so don't display
1609 				 */
1610 				continue;
1611 
1612 			/* Display the element name in the first column */
1613 			seq_printf(s, "%30s |", map[i].name);
1614 
1615 			/* Loop over the enabled states and display if required */
1616 			pmc_for_each_mode(idx, mode, pmcdev) {
1617 				if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask)
1618 					seq_printf(s, " %9s |",
1619 						   "Required");
1620 				else
1621 					seq_printf(s, " %9s |", " ");
1622 			}
1623 
1624 			/* In Status column, show the last captured state of this agent */
1625 			if (lpm_status & bit_mask)
1626 				seq_printf(s, " %9s |", "Yes");
1627 			else
1628 				seq_printf(s, " %9s |", " ");
1629 
1630 			seq_puts(s, "\n");
1631 		}
1632 	}
1633 
1634 	return 0;
1635 }
1636 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
1637 
pmc_core_lpm_latch_mode_show(struct seq_file * s,void * unused)1638 static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
1639 {
1640 	struct pmc_dev *pmcdev = s->private;
1641 	bool c10;
1642 	u32 reg;
1643 	int idx, mode;
1644 
1645 	reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
1646 	if (reg & LPM_STS_LATCH_MODE) {
1647 		seq_puts(s, "c10");
1648 		c10 = false;
1649 	} else {
1650 		seq_puts(s, "[c10]");
1651 		c10 = true;
1652 	}
1653 
1654 	pmc_for_each_mode(idx, mode, pmcdev) {
1655 		if ((BIT(mode) & reg) && !c10)
1656 			seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
1657 		else
1658 			seq_printf(s, " %s", pmc_lpm_modes[mode]);
1659 	}
1660 
1661 	seq_puts(s, " clear\n");
1662 
1663 	return 0;
1664 }
1665 
pmc_core_lpm_latch_mode_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)1666 static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
1667 					     const char __user *userbuf,
1668 					     size_t count, loff_t *ppos)
1669 {
1670 	struct seq_file *s = file->private_data;
1671 	struct pmc_dev *pmcdev = s->private;
1672 	bool clear = false, c10 = false;
1673 	unsigned char buf[8];
1674 	int idx, m, mode;
1675 	u32 reg;
1676 
1677 	if (count > sizeof(buf) - 1)
1678 		return -EINVAL;
1679 	if (copy_from_user(buf, userbuf, count))
1680 		return -EFAULT;
1681 	buf[count] = '\0';
1682 
1683 	/*
1684 	 * Allowed strings are:
1685 	 *	Any enabled substate, e.g. 'S0i2.0'
1686 	 *	'c10'
1687 	 *	'clear'
1688 	 */
1689 	mode = sysfs_match_string(pmc_lpm_modes, buf);
1690 
1691 	/* Check string matches enabled mode */
1692 	pmc_for_each_mode(idx, m, pmcdev)
1693 		if (mode == m)
1694 			break;
1695 
1696 	if (mode != m || mode < 0) {
1697 		if (sysfs_streq(buf, "clear"))
1698 			clear = true;
1699 		else if (sysfs_streq(buf, "c10"))
1700 			c10 = true;
1701 		else
1702 			return -EINVAL;
1703 	}
1704 
1705 	if (clear) {
1706 		mutex_lock(&pmcdev->lock);
1707 
1708 		reg = pmc_core_reg_read(pmcdev, pmcdev->map->etr3_offset);
1709 		reg |= ETR3_CLEAR_LPM_EVENTS;
1710 		pmc_core_reg_write(pmcdev, pmcdev->map->etr3_offset, reg);
1711 
1712 		mutex_unlock(&pmcdev->lock);
1713 
1714 		return count;
1715 	}
1716 
1717 	if (c10) {
1718 		mutex_lock(&pmcdev->lock);
1719 
1720 		reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
1721 		reg &= ~LPM_STS_LATCH_MODE;
1722 		pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
1723 
1724 		mutex_unlock(&pmcdev->lock);
1725 
1726 		return count;
1727 	}
1728 
1729 	/*
1730 	 * For LPM mode latching we set the latch enable bit and selected mode
1731 	 * and clear everything else.
1732 	 */
1733 	reg = LPM_STS_LATCH_MODE | BIT(mode);
1734 	mutex_lock(&pmcdev->lock);
1735 	pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
1736 	mutex_unlock(&pmcdev->lock);
1737 
1738 	return count;
1739 }
1740 DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode);
1741 
pmc_core_pkgc_show(struct seq_file * s,void * unused)1742 static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
1743 {
1744 	struct pmc_dev *pmcdev = s->private;
1745 	const struct pmc_bit_map *map = pmcdev->map->msr_sts;
1746 	u64 pcstate_count;
1747 	int index;
1748 
1749 	for (index = 0; map[index].name ; index++) {
1750 		if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
1751 			continue;
1752 
1753 		pcstate_count *= 1000;
1754 		do_div(pcstate_count, tsc_khz);
1755 		seq_printf(s, "%-8s : %llu\n", map[index].name,
1756 			   pcstate_count);
1757 	}
1758 
1759 	return 0;
1760 }
1761 DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
1762 
pmc_core_pri_verify(u32 lpm_pri,u8 * mode_order)1763 static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
1764 {
1765 	int i, j;
1766 
1767 	if (!lpm_pri)
1768 		return false;
1769 	/*
1770 	 * Each byte contains the priority level for 2 modes (7:4 and 3:0).
1771 	 * In a 32 bit register this allows for describing 8 modes. Store the
1772 	 * levels and look for values out of range.
1773 	 */
1774 	for (i = 0; i < 8; i++) {
1775 		int level = lpm_pri & GENMASK(3, 0);
1776 
1777 		if (level >= LPM_MAX_NUM_MODES)
1778 			return false;
1779 
1780 		mode_order[i] = level;
1781 		lpm_pri >>= 4;
1782 	}
1783 
1784 	/* Check that we have unique values */
1785 	for (i = 0; i < LPM_MAX_NUM_MODES - 1; i++)
1786 		for (j = i + 1; j < LPM_MAX_NUM_MODES; j++)
1787 			if (mode_order[i] == mode_order[j])
1788 				return false;
1789 
1790 	return true;
1791 }
1792 
pmc_core_get_low_power_modes(struct platform_device * pdev)1793 static void pmc_core_get_low_power_modes(struct platform_device *pdev)
1794 {
1795 	struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
1796 	u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI;
1797 	u8 mode_order[LPM_MAX_NUM_MODES];
1798 	u32 lpm_pri;
1799 	u32 lpm_en;
1800 	int mode, i, p;
1801 
1802 	/* Use LPM Maps to indicate support for substates */
1803 	if (!pmcdev->map->lpm_num_maps)
1804 		return;
1805 
1806 	lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
1807 	pmcdev->num_lpm_modes = hweight32(lpm_en);
1808 
1809 	/* Read 32 bit LPM_PRI register */
1810 	lpm_pri = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_priority_offset);
1811 
1812 
1813 	/*
1814 	 * If lpm_pri value passes verification, then override the default
1815 	 * modes here. Otherwise stick with the default.
1816 	 */
1817 	if (pmc_core_pri_verify(lpm_pri, mode_order))
1818 		/* Get list of modes in priority order */
1819 		for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++)
1820 			pri_order[mode_order[mode]] = mode;
1821 	else
1822 		dev_warn(&pdev->dev, "Assuming a default substate order for this platform\n");
1823 
1824 	/*
1825 	 * Loop through all modes from lowest to highest priority,
1826 	 * and capture all enabled modes in order
1827 	 */
1828 	i = 0;
1829 	for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
1830 		int mode = pri_order[p];
1831 
1832 		if (!(BIT(mode) & lpm_en))
1833 			continue;
1834 
1835 		pmcdev->lpm_en_modes[i++] = mode;
1836 	}
1837 }
1838 
pmc_core_dbgfs_unregister(struct pmc_dev * pmcdev)1839 static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
1840 {
1841 	debugfs_remove_recursive(pmcdev->dbgfs_dir);
1842 }
1843 
pmc_core_dbgfs_register(struct pmc_dev * pmcdev)1844 static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
1845 {
1846 	struct dentry *dir;
1847 
1848 	dir = debugfs_create_dir("pmc_core", NULL);
1849 	pmcdev->dbgfs_dir = dir;
1850 
1851 	debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
1852 			    &pmc_core_dev_state);
1853 
1854 	if (pmcdev->map->pfear_sts)
1855 		debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
1856 				    pmcdev, &pmc_core_ppfear_fops);
1857 
1858 	debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
1859 			    &pmc_core_ltr_ignore_ops);
1860 
1861 	debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
1862 
1863 	debugfs_create_file("package_cstate_show", 0444, dir, pmcdev,
1864 			    &pmc_core_pkgc_fops);
1865 
1866 	if (pmcdev->map->pll_sts)
1867 		debugfs_create_file("pll_status", 0444, dir, pmcdev,
1868 				    &pmc_core_pll_fops);
1869 
1870 	if (pmcdev->map->mphy_sts)
1871 		debugfs_create_file("mphy_core_lanes_power_gating_status",
1872 				    0444, dir, pmcdev,
1873 				    &pmc_core_mphy_pg_fops);
1874 
1875 	if (pmcdev->map->slps0_dbg_maps) {
1876 		debugfs_create_file("slp_s0_debug_status", 0444,
1877 				    dir, pmcdev,
1878 				    &pmc_core_slps0_dbg_fops);
1879 
1880 		debugfs_create_bool("slp_s0_dbg_latch", 0644,
1881 				    dir, &slps0_dbg_latch);
1882 	}
1883 
1884 	if (pmcdev->map->lpm_en_offset) {
1885 		debugfs_create_file("substate_residencies", 0444,
1886 				    pmcdev->dbgfs_dir, pmcdev,
1887 				    &pmc_core_substate_res_fops);
1888 	}
1889 
1890 	if (pmcdev->map->lpm_status_offset) {
1891 		debugfs_create_file("substate_status_registers", 0444,
1892 				    pmcdev->dbgfs_dir, pmcdev,
1893 				    &pmc_core_substate_sts_regs_fops);
1894 		debugfs_create_file("substate_live_status_registers", 0444,
1895 				    pmcdev->dbgfs_dir, pmcdev,
1896 				    &pmc_core_substate_l_sts_regs_fops);
1897 		debugfs_create_file("lpm_latch_mode", 0644,
1898 				    pmcdev->dbgfs_dir, pmcdev,
1899 				    &pmc_core_lpm_latch_mode_fops);
1900 	}
1901 
1902 	if (pmcdev->lpm_req_regs) {
1903 		debugfs_create_file("substate_requirements", 0444,
1904 				    pmcdev->dbgfs_dir, pmcdev,
1905 				    &pmc_core_substate_req_regs_fops);
1906 	}
1907 }
1908 
1909 static const struct x86_cpu_id intel_pmc_core_ids[] = {
1910 	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L,		&spt_reg_map),
1911 	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE,		&spt_reg_map),
1912 	X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L,		&spt_reg_map),
1913 	X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE,		&spt_reg_map),
1914 	X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L,	&cnp_reg_map),
1915 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,		&icl_reg_map),
1916 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI,	&icl_reg_map),
1917 	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,		&cnp_reg_map),
1918 	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L,		&cnp_reg_map),
1919 	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,		&tgl_reg_map),
1920 	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,		&tgl_reg_map),
1921 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT,	&tgl_reg_map),
1922 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,	&icl_reg_map),
1923 	X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,		&tgl_reg_map),
1924 	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,		&tgl_reg_map),
1925 	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,		&adl_reg_map),
1926 	{}
1927 };
1928 
1929 MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
1930 
1931 static const struct pci_device_id pmc_pci_ids[] = {
1932 	{ PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
1933 	{ }
1934 };
1935 
1936 /*
1937  * This quirk can be used on those platforms where
1938  * the platform BIOS enforces 24Mhz crystal to shutdown
1939  * before PMC can assert SLP_S0#.
1940  */
1941 static bool xtal_ignore;
quirk_xtal_ignore(const struct dmi_system_id * id)1942 static int quirk_xtal_ignore(const struct dmi_system_id *id)
1943 {
1944 	xtal_ignore = true;
1945 	return 0;
1946 }
1947 
pmc_core_xtal_ignore(struct pmc_dev * pmcdev)1948 static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
1949 {
1950 	u32 value;
1951 
1952 	value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
1953 	/* 24MHz Crystal Shutdown Qualification Disable */
1954 	value |= SPT_PMC_VRIC1_XTALSDQDIS;
1955 	/* Low Voltage Mode Enable */
1956 	value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
1957 	pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
1958 }
1959 
1960 static const struct dmi_system_id pmc_core_dmi_table[]  = {
1961 	{
1962 	.callback = quirk_xtal_ignore,
1963 	.ident = "HP Elite x2 1013 G3",
1964 	.matches = {
1965 		DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1966 		DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
1967 		},
1968 	},
1969 	{}
1970 };
1971 
pmc_core_do_dmi_quirks(struct pmc_dev * pmcdev)1972 static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
1973 {
1974 	dmi_check_system(pmc_core_dmi_table);
1975 
1976 	if (xtal_ignore)
1977 		pmc_core_xtal_ignore(pmcdev);
1978 }
1979 
pmc_core_probe(struct platform_device * pdev)1980 static int pmc_core_probe(struct platform_device *pdev)
1981 {
1982 	static bool device_initialized;
1983 	struct pmc_dev *pmcdev;
1984 	const struct x86_cpu_id *cpu_id;
1985 	u64 slp_s0_addr;
1986 
1987 	if (device_initialized)
1988 		return -ENODEV;
1989 
1990 	pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL);
1991 	if (!pmcdev)
1992 		return -ENOMEM;
1993 
1994 	platform_set_drvdata(pdev, pmcdev);
1995 
1996 	cpu_id = x86_match_cpu(intel_pmc_core_ids);
1997 	if (!cpu_id)
1998 		return -ENODEV;
1999 
2000 	pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data;
2001 
2002 	/*
2003 	 * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
2004 	 * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
2005 	 * in this case.
2006 	 */
2007 	if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
2008 		pmcdev->map = &cnp_reg_map;
2009 
2010 	if (lpit_read_residency_count_address(&slp_s0_addr)) {
2011 		pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
2012 
2013 		if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
2014 			return -ENODEV;
2015 	} else {
2016 		pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
2017 	}
2018 
2019 	pmcdev->regbase = ioremap(pmcdev->base_addr,
2020 				  pmcdev->map->regmap_length);
2021 	if (!pmcdev->regbase)
2022 		return -ENOMEM;
2023 
2024 	mutex_init(&pmcdev->lock);
2025 
2026 	pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(pmcdev);
2027 	pmc_core_get_low_power_modes(pdev);
2028 	pmc_core_do_dmi_quirks(pmcdev);
2029 
2030 	if (pmcdev->map == &tgl_reg_map)
2031 		pmc_core_get_tgl_lpm_reqs(pdev);
2032 
2033 	/*
2034 	 * On TGL and ADL, due to a hardware limitation, the GBE LTR blocks PC10
2035 	 * when a cable is attached. Tell the PMC to ignore it.
2036 	 */
2037 	if (pmcdev->map == &tgl_reg_map || pmcdev->map == &adl_reg_map) {
2038 		dev_dbg(&pdev->dev, "ignoring GBE LTR\n");
2039 		pmc_core_send_ltr_ignore(pmcdev, 3);
2040 	}
2041 
2042 	pmc_core_dbgfs_register(pmcdev);
2043 
2044 	device_initialized = true;
2045 	dev_info(&pdev->dev, " initialized\n");
2046 
2047 	return 0;
2048 }
2049 
pmc_core_remove(struct platform_device * pdev)2050 static int pmc_core_remove(struct platform_device *pdev)
2051 {
2052 	struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
2053 
2054 	pmc_core_dbgfs_unregister(pmcdev);
2055 	platform_set_drvdata(pdev, NULL);
2056 	mutex_destroy(&pmcdev->lock);
2057 	iounmap(pmcdev->regbase);
2058 	return 0;
2059 }
2060 
2061 static bool warn_on_s0ix_failures;
2062 module_param(warn_on_s0ix_failures, bool, 0644);
2063 MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
2064 
pmc_core_suspend(struct device * dev)2065 static __maybe_unused int pmc_core_suspend(struct device *dev)
2066 {
2067 	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
2068 
2069 	pmcdev->check_counters = false;
2070 
2071 	/* No warnings on S0ix failures */
2072 	if (!warn_on_s0ix_failures)
2073 		return 0;
2074 
2075 	/* Check if the syspend will actually use S0ix */
2076 	if (pm_suspend_via_firmware())
2077 		return 0;
2078 
2079 	/* Save PC10 residency for checking later */
2080 	if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
2081 		return -EIO;
2082 
2083 	/* Save S0ix residency for checking later */
2084 	if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter))
2085 		return -EIO;
2086 
2087 	pmcdev->check_counters = true;
2088 	return 0;
2089 }
2090 
pmc_core_is_pc10_failed(struct pmc_dev * pmcdev)2091 static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
2092 {
2093 	u64 pc10_counter;
2094 
2095 	if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
2096 		return false;
2097 
2098 	if (pc10_counter == pmcdev->pc10_counter)
2099 		return true;
2100 
2101 	return false;
2102 }
2103 
pmc_core_is_s0ix_failed(struct pmc_dev * pmcdev)2104 static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
2105 {
2106 	u64 s0ix_counter;
2107 
2108 	if (pmc_core_dev_state_get(pmcdev, &s0ix_counter))
2109 		return false;
2110 
2111 	if (s0ix_counter == pmcdev->s0ix_counter)
2112 		return true;
2113 
2114 	return false;
2115 }
2116 
pmc_core_resume(struct device * dev)2117 static __maybe_unused int pmc_core_resume(struct device *dev)
2118 {
2119 	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
2120 	const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
2121 	int offset = pmcdev->map->lpm_status_offset;
2122 
2123 	if (!pmcdev->check_counters)
2124 		return 0;
2125 
2126 	if (!pmc_core_is_s0ix_failed(pmcdev))
2127 		return 0;
2128 
2129 	if (pmc_core_is_pc10_failed(pmcdev)) {
2130 		/* S0ix failed because of PC10 entry failure */
2131 		dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
2132 			 pmcdev->pc10_counter);
2133 		return 0;
2134 	}
2135 
2136 	/* The real interesting case - S0ix failed - lets ask PMC why. */
2137 	dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
2138 		 pmcdev->s0ix_counter);
2139 	if (pmcdev->map->slps0_dbg_maps)
2140 		pmc_core_slps0_display(pmcdev, dev, NULL);
2141 	if (pmcdev->map->lpm_sts)
2142 		pmc_core_lpm_display(pmcdev, dev, NULL, offset, "STATUS", maps);
2143 
2144 	return 0;
2145 }
2146 
2147 static const struct dev_pm_ops pmc_core_pm_ops = {
2148 	SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
2149 };
2150 
2151 static const struct acpi_device_id pmc_core_acpi_ids[] = {
2152 	{"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
2153 	{ }
2154 };
2155 MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
2156 
2157 static struct platform_driver pmc_core_driver = {
2158 	.driver = {
2159 		.name = "intel_pmc_core",
2160 		.acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
2161 		.pm = &pmc_core_pm_ops,
2162 		.dev_groups = pmc_dev_groups,
2163 	},
2164 	.probe = pmc_core_probe,
2165 	.remove = pmc_core_remove,
2166 };
2167 
2168 module_platform_driver(pmc_core_driver);
2169 
2170 MODULE_LICENSE("GPL v2");
2171 MODULE_DESCRIPTION("Intel PMC Core Driver");
2172