• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
3  *
4  * (C) Copyright 2014, 2015 Linaro Ltd.
5  * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  *
12  * CPPC describes a few methods for controlling CPU performance using
13  * information from a per CPU table called CPC. This table is described in
14  * the ACPI v5.0+ specification. The table consists of a list of
15  * registers which may be memory mapped or hardware registers and also may
16  * include some static integer values.
17  *
18  * CPU performance is on an abstract continuous scale as against a discretized
19  * P-state scale which is tied to CPU frequency only. In brief, the basic
20  * operation involves:
21  *
22  * - OS makes a CPU performance request. (Can provide min and max bounds)
23  *
24  * - Platform (such as BMC) is free to optimize request within requested bounds
25  *   depending on power/thermal budgets etc.
26  *
27  * - Platform conveys its decision back to OS
28  *
29  * The communication between OS and platform occurs through another medium
30  * called (PCC) Platform Communication Channel. This is a generic mailbox like
31  * mechanism which includes doorbell semantics to indicate register updates.
32  * See drivers/mailbox/pcc.c for details on PCC.
33  *
34  * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
35  * above specifications.
36  */
37 
38 #define pr_fmt(fmt)	"ACPI CPPC: " fmt
39 
40 #include <linux/cpufreq.h>
41 #include <linux/delay.h>
42 
43 #include <acpi/cppc_acpi.h>
44 /*
45  * Lock to provide mutually exclusive access to the PCC
46  * channel. e.g. When the remote updates the shared region
47  * with new data, the reader needs to be protected from
48  * other CPUs activity on the same channel.
49  */
50 static DEFINE_SPINLOCK(pcc_lock);
51 
52 /*
53  * The cpc_desc structure contains the ACPI register details
54  * as described in the per CPU _CPC tables. The details
55  * include the type of register (e.g. PCC, System IO, FFH etc.)
56  * and destination addresses which lets us READ/WRITE CPU performance
57  * information using the appropriate I/O methods.
58  */
59 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
60 
61 /* This layer handles all the PCC specifics for CPPC. */
62 static struct mbox_chan *pcc_channel;
63 static void __iomem *pcc_comm_addr;
64 static u64 comm_base_addr;
65 static int pcc_subspace_idx = -1;
66 static u16 pcc_cmd_delay;
67 static bool pcc_channel_acquired;
68 
69 /*
70  * Arbitrary Retries in case the remote processor is slow to respond
71  * to PCC commands.
72  */
73 #define NUM_RETRIES 500
74 
send_pcc_cmd(u16 cmd)75 static int send_pcc_cmd(u16 cmd)
76 {
77 	int retries, result = -EIO;
78 	struct acpi_pcct_hw_reduced *pcct_ss = pcc_channel->con_priv;
79 	struct acpi_pcct_shared_memory *generic_comm_base =
80 		(struct acpi_pcct_shared_memory *) pcc_comm_addr;
81 	u32 cmd_latency = pcct_ss->latency;
82 
83 	/* Min time OS should wait before sending next command. */
84 	udelay(pcc_cmd_delay);
85 
86 	/* Write to the shared comm region. */
87 	writew(cmd, &generic_comm_base->command);
88 
89 	/* Flip CMD COMPLETE bit */
90 	writew(0, &generic_comm_base->status);
91 
92 	/* Ring doorbell */
93 	result = mbox_send_message(pcc_channel, &cmd);
94 	if (result < 0) {
95 		pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
96 				cmd, result);
97 		return result;
98 	}
99 
100 	/* Wait for a nominal time to let platform process command. */
101 	udelay(cmd_latency);
102 
103 	/* Retry in case the remote processor was too slow to catch up. */
104 	for (retries = NUM_RETRIES; retries > 0; retries--) {
105 		if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
106 			result = 0;
107 			break;
108 		}
109 	}
110 
111 	mbox_client_txdone(pcc_channel, result);
112 	return result;
113 }
114 
cppc_chan_tx_done(struct mbox_client * cl,void * msg,int ret)115 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
116 {
117 	if (ret)
118 		pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
119 				*(u16 *)msg, ret);
120 	else
121 		pr_debug("TX completed. CMD sent:%x, ret:%d\n",
122 				*(u16 *)msg, ret);
123 }
124 
125 struct mbox_client cppc_mbox_cl = {
126 	.tx_done = cppc_chan_tx_done,
127 	.knows_txdone = true,
128 };
129 
acpi_get_psd(struct cpc_desc * cpc_ptr,acpi_handle handle)130 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
131 {
132 	int result = -EFAULT;
133 	acpi_status status = AE_OK;
134 	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
135 	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
136 	struct acpi_buffer state = {0, NULL};
137 	union acpi_object  *psd = NULL;
138 	struct acpi_psd_package *pdomain;
139 
140 	status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
141 					    &buffer, ACPI_TYPE_PACKAGE);
142 	if (status == AE_NOT_FOUND)	/* _PSD is optional */
143 		return 0;
144 	if (ACPI_FAILURE(status))
145 		return -ENODEV;
146 
147 	psd = buffer.pointer;
148 	if (!psd || psd->package.count != 1) {
149 		pr_debug("Invalid _PSD data\n");
150 		goto end;
151 	}
152 
153 	pdomain = &(cpc_ptr->domain_info);
154 
155 	state.length = sizeof(struct acpi_psd_package);
156 	state.pointer = pdomain;
157 
158 	status = acpi_extract_package(&(psd->package.elements[0]),
159 		&format, &state);
160 	if (ACPI_FAILURE(status)) {
161 		pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
162 		goto end;
163 	}
164 
165 	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
166 		pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
167 		goto end;
168 	}
169 
170 	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
171 		pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
172 		goto end;
173 	}
174 
175 	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
176 	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
177 	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
178 		pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
179 		goto end;
180 	}
181 
182 	result = 0;
183 end:
184 	kfree(buffer.pointer);
185 	return result;
186 }
187 
188 /**
189  * acpi_get_psd_map - Map the CPUs in a common freq domain.
190  * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
191  *
192  *	Return: 0 for success or negative value for err.
193  */
acpi_get_psd_map(struct cpudata ** all_cpu_data)194 int acpi_get_psd_map(struct cpudata **all_cpu_data)
195 {
196 	int count_target;
197 	int retval = 0;
198 	unsigned int i, j;
199 	cpumask_var_t covered_cpus;
200 	struct cpudata *pr, *match_pr;
201 	struct acpi_psd_package *pdomain;
202 	struct acpi_psd_package *match_pdomain;
203 	struct cpc_desc *cpc_ptr, *match_cpc_ptr;
204 
205 	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
206 		return -ENOMEM;
207 
208 	/*
209 	 * Now that we have _PSD data from all CPUs, lets setup P-state
210 	 * domain info.
211 	 */
212 	for_each_possible_cpu(i) {
213 		pr = all_cpu_data[i];
214 		if (!pr)
215 			continue;
216 
217 		if (cpumask_test_cpu(i, covered_cpus))
218 			continue;
219 
220 		cpc_ptr = per_cpu(cpc_desc_ptr, i);
221 		if (!cpc_ptr) {
222 			retval = -EFAULT;
223 			goto err_ret;
224 		}
225 
226 		pdomain = &(cpc_ptr->domain_info);
227 		cpumask_set_cpu(i, pr->shared_cpu_map);
228 		cpumask_set_cpu(i, covered_cpus);
229 		if (pdomain->num_processors <= 1)
230 			continue;
231 
232 		/* Validate the Domain info */
233 		count_target = pdomain->num_processors;
234 		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
235 			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
236 		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
237 			pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
238 		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
239 			pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
240 
241 		for_each_possible_cpu(j) {
242 			if (i == j)
243 				continue;
244 
245 			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
246 			if (!match_cpc_ptr) {
247 				retval = -EFAULT;
248 				goto err_ret;
249 			}
250 
251 			match_pdomain = &(match_cpc_ptr->domain_info);
252 			if (match_pdomain->domain != pdomain->domain)
253 				continue;
254 
255 			/* Here i and j are in the same domain */
256 			if (match_pdomain->num_processors != count_target) {
257 				retval = -EFAULT;
258 				goto err_ret;
259 			}
260 
261 			if (pdomain->coord_type != match_pdomain->coord_type) {
262 				retval = -EFAULT;
263 				goto err_ret;
264 			}
265 
266 			cpumask_set_cpu(j, covered_cpus);
267 			cpumask_set_cpu(j, pr->shared_cpu_map);
268 		}
269 
270 		for_each_possible_cpu(j) {
271 			if (i == j)
272 				continue;
273 
274 			match_pr = all_cpu_data[j];
275 			if (!match_pr)
276 				continue;
277 
278 			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
279 			if (!match_cpc_ptr) {
280 				retval = -EFAULT;
281 				goto err_ret;
282 			}
283 
284 			match_pdomain = &(match_cpc_ptr->domain_info);
285 			if (match_pdomain->domain != pdomain->domain)
286 				continue;
287 
288 			match_pr->shared_type = pr->shared_type;
289 			cpumask_copy(match_pr->shared_cpu_map,
290 				     pr->shared_cpu_map);
291 		}
292 	}
293 
294 err_ret:
295 	for_each_possible_cpu(i) {
296 		pr = all_cpu_data[i];
297 		if (!pr)
298 			continue;
299 
300 		/* Assume no coordination on any error parsing domain info */
301 		if (retval) {
302 			cpumask_clear(pr->shared_cpu_map);
303 			cpumask_set_cpu(i, pr->shared_cpu_map);
304 			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
305 		}
306 	}
307 
308 	free_cpumask_var(covered_cpus);
309 	return retval;
310 }
311 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
312 
register_pcc_channel(int pcc_subspace_idx)313 static int register_pcc_channel(int pcc_subspace_idx)
314 {
315 	struct acpi_pcct_hw_reduced *cppc_ss;
316 	unsigned int len;
317 
318 	if (pcc_subspace_idx >= 0) {
319 		pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
320 				pcc_subspace_idx);
321 
322 		if (IS_ERR(pcc_channel)) {
323 			pr_err("Failed to find PCC communication channel\n");
324 			return -ENODEV;
325 		}
326 
327 		/*
328 		 * The PCC mailbox controller driver should
329 		 * have parsed the PCCT (global table of all
330 		 * PCC channels) and stored pointers to the
331 		 * subspace communication region in con_priv.
332 		 */
333 		cppc_ss = pcc_channel->con_priv;
334 
335 		if (!cppc_ss) {
336 			pr_err("No PCC subspace found for CPPC\n");
337 			return -ENODEV;
338 		}
339 
340 		/*
341 		 * This is the shared communication region
342 		 * for the OS and Platform to communicate over.
343 		 */
344 		comm_base_addr = cppc_ss->base_address;
345 		len = cppc_ss->length;
346 		pcc_cmd_delay = cppc_ss->min_turnaround_time;
347 
348 		pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len);
349 		if (!pcc_comm_addr) {
350 			pr_err("Failed to ioremap PCC comm region mem\n");
351 			return -ENOMEM;
352 		}
353 
354 		/* Set flag so that we dont come here for each CPU. */
355 		pcc_channel_acquired = true;
356 	}
357 
358 	return 0;
359 }
360 
361 /*
362  * An example CPC table looks like the following.
363  *
364  *	Name(_CPC, Package()
365  *			{
366  *			17,
367  *			NumEntries
368  *			1,
369  *			// Revision
370  *			ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
371  *			// Highest Performance
372  *			ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
373  *			// Nominal Performance
374  *			ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
375  *			// Lowest Nonlinear Performance
376  *			ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
377  *			// Lowest Performance
378  *			ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
379  *			// Guaranteed Performance Register
380  *			ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
381  *			// Desired Performance Register
382  *			ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
383  *			..
384  *			..
385  *			..
386  *
387  *		}
388  * Each Register() encodes how to access that specific register.
389  * e.g. a sample PCC entry has the following encoding:
390  *
391  *	Register (
392  *		PCC,
393  *		AddressSpaceKeyword
394  *		8,
395  *		//RegisterBitWidth
396  *		8,
397  *		//RegisterBitOffset
398  *		0x30,
399  *		//RegisterAddress
400  *		9
401  *		//AccessSize (subspace ID)
402  *		0
403  *		)
404  *	}
405  */
406 
407 /**
408  * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
409  * @pr: Ptr to acpi_processor containing this CPUs logical Id.
410  *
411  *	Return: 0 for success or negative value for err.
412  */
acpi_cppc_processor_probe(struct acpi_processor * pr)413 int acpi_cppc_processor_probe(struct acpi_processor *pr)
414 {
415 	struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
416 	union acpi_object *out_obj, *cpc_obj;
417 	struct cpc_desc *cpc_ptr;
418 	struct cpc_reg *gas_t;
419 	acpi_handle handle = pr->handle;
420 	unsigned int num_ent, i, cpc_rev;
421 	acpi_status status;
422 	int ret = -EFAULT;
423 
424 	/* Parse the ACPI _CPC table for this cpu. */
425 	status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
426 			ACPI_TYPE_PACKAGE);
427 	if (ACPI_FAILURE(status)) {
428 		ret = -ENODEV;
429 		goto out_buf_free;
430 	}
431 
432 	out_obj = (union acpi_object *) output.pointer;
433 
434 	cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
435 	if (!cpc_ptr) {
436 		ret = -ENOMEM;
437 		goto out_buf_free;
438 	}
439 
440 	/* First entry is NumEntries. */
441 	cpc_obj = &out_obj->package.elements[0];
442 	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
443 		num_ent = cpc_obj->integer.value;
444 	} else {
445 		pr_debug("Unexpected entry type(%d) for NumEntries\n",
446 				cpc_obj->type);
447 		goto out_free;
448 	}
449 
450 	/* Only support CPPCv2. Bail otherwise. */
451 	if (num_ent != CPPC_NUM_ENT) {
452 		pr_debug("Firmware exports %d entries. Expected: %d\n",
453 				num_ent, CPPC_NUM_ENT);
454 		goto out_free;
455 	}
456 
457 	/* Second entry should be revision. */
458 	cpc_obj = &out_obj->package.elements[1];
459 	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
460 		cpc_rev = cpc_obj->integer.value;
461 	} else {
462 		pr_debug("Unexpected entry type(%d) for Revision\n",
463 				cpc_obj->type);
464 		goto out_free;
465 	}
466 
467 	if (cpc_rev != CPPC_REV) {
468 		pr_debug("Firmware exports revision:%d. Expected:%d\n",
469 				cpc_rev, CPPC_REV);
470 		goto out_free;
471 	}
472 
473 	/* Iterate through remaining entries in _CPC */
474 	for (i = 2; i < num_ent; i++) {
475 		cpc_obj = &out_obj->package.elements[i];
476 
477 		if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
478 			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
479 			cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
480 		} else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
481 			gas_t = (struct cpc_reg *)
482 				cpc_obj->buffer.pointer;
483 
484 			/*
485 			 * The PCC Subspace index is encoded inside
486 			 * the CPC table entries. The same PCC index
487 			 * will be used for all the PCC entries,
488 			 * so extract it only once.
489 			 */
490 			if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
491 				if (pcc_subspace_idx < 0)
492 					pcc_subspace_idx = gas_t->access_width;
493 				else if (pcc_subspace_idx != gas_t->access_width) {
494 					pr_debug("Mismatched PCC ids.\n");
495 					goto out_free;
496 				}
497 			} else if (gas_t->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
498 				/* Support only PCC and SYS MEM type regs */
499 				pr_debug("Unsupported register type: %d\n", gas_t->space_id);
500 				goto out_free;
501 			}
502 
503 			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
504 			memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
505 		} else {
506 			pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
507 			goto out_free;
508 		}
509 	}
510 	/* Store CPU Logical ID */
511 	cpc_ptr->cpu_id = pr->id;
512 
513 	/* Parse PSD data for this CPU */
514 	ret = acpi_get_psd(cpc_ptr, handle);
515 	if (ret)
516 		goto out_free;
517 
518 	/* Register PCC channel once for all CPUs. */
519 	if (!pcc_channel_acquired) {
520 		ret = register_pcc_channel(pcc_subspace_idx);
521 		if (ret)
522 			goto out_free;
523 	}
524 
525 	/* Plug PSD data into this CPUs CPC descriptor. */
526 	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
527 
528 	/* Everything looks okay */
529 	pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
530 
531 	kfree(output.pointer);
532 	return 0;
533 
534 out_free:
535 	kfree(cpc_ptr);
536 
537 out_buf_free:
538 	kfree(output.pointer);
539 	return ret;
540 }
541 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
542 
543 /**
544  * acpi_cppc_processor_exit - Cleanup CPC structs.
545  * @pr: Ptr to acpi_processor containing this CPUs logical Id.
546  *
547  * Return: Void
548  */
acpi_cppc_processor_exit(struct acpi_processor * pr)549 void acpi_cppc_processor_exit(struct acpi_processor *pr)
550 {
551 	struct cpc_desc *cpc_ptr;
552 	cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
553 	kfree(cpc_ptr);
554 }
555 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
556 
get_phys_addr(struct cpc_reg * reg)557 static u64 get_phys_addr(struct cpc_reg *reg)
558 {
559 	/* PCC communication addr space begins at byte offset 0x8. */
560 	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
561 		return (u64)comm_base_addr + 0x8 + reg->address;
562 	else
563 		return reg->address;
564 }
565 
cpc_read(struct cpc_reg * reg,u64 * val)566 static void cpc_read(struct cpc_reg *reg, u64 *val)
567 {
568 	u64 addr = get_phys_addr(reg);
569 
570 	acpi_os_read_memory((acpi_physical_address)addr,
571 			val, reg->bit_width);
572 }
573 
cpc_write(struct cpc_reg * reg,u64 val)574 static void cpc_write(struct cpc_reg *reg, u64 val)
575 {
576 	u64 addr = get_phys_addr(reg);
577 
578 	acpi_os_write_memory((acpi_physical_address)addr,
579 			val, reg->bit_width);
580 }
581 
582 /**
583  * cppc_get_perf_caps - Get a CPUs performance capabilities.
584  * @cpunum: CPU from which to get capabilities info.
585  * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
586  *
587  * Return: 0 for success with perf_caps populated else -ERRNO.
588  */
cppc_get_perf_caps(int cpunum,struct cppc_perf_caps * perf_caps)589 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
590 {
591 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
592 	struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
593 								 *nom_perf;
594 	u64 high, low, ref, nom;
595 	int ret = 0;
596 
597 	if (!cpc_desc) {
598 		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
599 		return -ENODEV;
600 	}
601 
602 	highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
603 	lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
604 	ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF];
605 	nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF];
606 
607 	spin_lock(&pcc_lock);
608 
609 	/* Are any of the regs PCC ?*/
610 	if ((highest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
611 			(lowest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
612 			(ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
613 			(nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
614 		/* Ring doorbell once to update PCC subspace */
615 		if (send_pcc_cmd(CMD_READ)) {
616 			ret = -EIO;
617 			goto out_err;
618 		}
619 	}
620 
621 	cpc_read(&highest_reg->cpc_entry.reg, &high);
622 	perf_caps->highest_perf = high;
623 
624 	cpc_read(&lowest_reg->cpc_entry.reg, &low);
625 	perf_caps->lowest_perf = low;
626 
627 	cpc_read(&ref_perf->cpc_entry.reg, &ref);
628 	perf_caps->reference_perf = ref;
629 
630 	cpc_read(&nom_perf->cpc_entry.reg, &nom);
631 	perf_caps->nominal_perf = nom;
632 
633 	if (!ref)
634 		perf_caps->reference_perf = perf_caps->nominal_perf;
635 
636 	if (!high || !low || !nom)
637 		ret = -EFAULT;
638 
639 out_err:
640 	spin_unlock(&pcc_lock);
641 	return ret;
642 }
643 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
644 
645 /**
646  * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
647  * @cpunum: CPU from which to read counters.
648  * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
649  *
650  * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
651  */
cppc_get_perf_ctrs(int cpunum,struct cppc_perf_fb_ctrs * perf_fb_ctrs)652 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
653 {
654 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
655 	struct cpc_register_resource *delivered_reg, *reference_reg;
656 	u64 delivered, reference;
657 	int ret = 0;
658 
659 	if (!cpc_desc) {
660 		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
661 		return -ENODEV;
662 	}
663 
664 	delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
665 	reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
666 
667 	spin_lock(&pcc_lock);
668 
669 	/* Are any of the regs PCC ?*/
670 	if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
671 			(reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
672 		/* Ring doorbell once to update PCC subspace */
673 		if (send_pcc_cmd(CMD_READ)) {
674 			ret = -EIO;
675 			goto out_err;
676 		}
677 	}
678 
679 	cpc_read(&delivered_reg->cpc_entry.reg, &delivered);
680 	cpc_read(&reference_reg->cpc_entry.reg, &reference);
681 
682 	if (!delivered || !reference) {
683 		ret = -EFAULT;
684 		goto out_err;
685 	}
686 
687 	perf_fb_ctrs->delivered = delivered;
688 	perf_fb_ctrs->reference = reference;
689 
690 	perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered;
691 	perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference;
692 
693 	perf_fb_ctrs->prev_delivered = delivered;
694 	perf_fb_ctrs->prev_reference = reference;
695 
696 out_err:
697 	spin_unlock(&pcc_lock);
698 	return ret;
699 }
700 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
701 
702 /**
703  * cppc_set_perf - Set a CPUs performance controls.
704  * @cpu: CPU for which to set performance controls.
705  * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
706  *
707  * Return: 0 for success, -ERRNO otherwise.
708  */
cppc_set_perf(int cpu,struct cppc_perf_ctrls * perf_ctrls)709 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
710 {
711 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
712 	struct cpc_register_resource *desired_reg;
713 	int ret = 0;
714 
715 	if (!cpc_desc) {
716 		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
717 		return -ENODEV;
718 	}
719 
720 	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
721 
722 	spin_lock(&pcc_lock);
723 
724 	/*
725 	 * Skip writing MIN/MAX until Linux knows how to come up with
726 	 * useful values.
727 	 */
728 	cpc_write(&desired_reg->cpc_entry.reg, perf_ctrls->desired_perf);
729 
730 	/* Is this a PCC reg ?*/
731 	if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
732 		/* Ring doorbell so Remote can get our perf request. */
733 		if (send_pcc_cmd(CMD_WRITE))
734 			ret = -EIO;
735 	}
736 
737 	spin_unlock(&pcc_lock);
738 
739 	return ret;
740 }
741 EXPORT_SYMBOL_GPL(cppc_set_perf);
742