• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4  *
5  * (C) Copyright 2014, 2015 Linaro Ltd.
6  * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7  *
8  * CPPC describes a few methods for controlling CPU performance using
9  * information from a per CPU table called CPC. This table is described in
10  * the ACPI v5.0+ specification. The table consists of a list of
11  * registers which may be memory mapped or hardware registers and also may
12  * include some static integer values.
13  *
14  * CPU performance is on an abstract continuous scale as against a discretized
15  * P-state scale which is tied to CPU frequency only. In brief, the basic
16  * operation involves:
17  *
18  * - OS makes a CPU performance request. (Can provide min and max bounds)
19  *
20  * - Platform (such as BMC) is free to optimize request within requested bounds
21  *   depending on power/thermal budgets etc.
22  *
23  * - Platform conveys its decision back to OS
24  *
25  * The communication between OS and platform occurs through another medium
26  * called (PCC) Platform Communication Channel. This is a generic mailbox like
27  * mechanism which includes doorbell semantics to indicate register updates.
28  * See drivers/mailbox/pcc.c for details on PCC.
29  *
30  * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31  * above specifications.
32  */
33 
34 #define pr_fmt(fmt)	"ACPI CPPC: " fmt
35 
36 #include <linux/cpufreq.h>
37 #include <linux/delay.h>
38 #include <linux/iopoll.h>
39 #include <linux/ktime.h>
40 #include <linux/rwsem.h>
41 #include <linux/wait.h>
42 
43 #include <acpi/cppc_acpi.h>
44 
45 struct cppc_pcc_data {
46 	struct mbox_chan *pcc_channel;
47 	void __iomem *pcc_comm_addr;
48 	bool pcc_channel_acquired;
49 	unsigned int deadline_us;
50 	unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
51 
52 	bool pending_pcc_write_cmd;	/* Any pending/batched PCC write cmds? */
53 	bool platform_owns_pcc;		/* Ownership of PCC subspace */
54 	unsigned int pcc_write_cnt;	/* Running count of PCC write commands */
55 
56 	/*
57 	 * Lock to provide controlled access to the PCC channel.
58 	 *
59 	 * For performance critical usecases(currently cppc_set_perf)
60 	 *	We need to take read_lock and check if channel belongs to OSPM
61 	 * before reading or writing to PCC subspace
62 	 *	We need to take write_lock before transferring the channel
63 	 * ownership to the platform via a Doorbell
64 	 *	This allows us to batch a number of CPPC requests if they happen
65 	 * to originate in about the same time
66 	 *
67 	 * For non-performance critical usecases(init)
68 	 *	Take write_lock for all purposes which gives exclusive access
69 	 */
70 	struct rw_semaphore pcc_lock;
71 
72 	/* Wait queue for CPUs whose requests were batched */
73 	wait_queue_head_t pcc_write_wait_q;
74 	ktime_t last_cmd_cmpl_time;
75 	ktime_t last_mpar_reset;
76 	int mpar_count;
77 	int refcount;
78 };
79 
80 /* Array to represent the PCC channel per subspace ID */
81 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
82 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
83 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
84 
85 /*
86  * The cpc_desc structure contains the ACPI register details
87  * as described in the per CPU _CPC tables. The details
88  * include the type of register (e.g. PCC, System IO, FFH etc.)
89  * and destination addresses which lets us READ/WRITE CPU performance
90  * information using the appropriate I/O methods.
91  */
92 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
93 
94 /* pcc mapped address + header size + offset within PCC subspace */
95 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
96 						0x8 + (offs))
97 
98 /* Check if a CPC register is in PCC */
99 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&		\
100 				(cpc)->cpc_entry.reg.space_id ==	\
101 				ACPI_ADR_SPACE_PLATFORM_COMM)
102 
103 /* Evalutes to True if reg is a NULL register descriptor */
104 #define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
105 				(reg)->address == 0 &&			\
106 				(reg)->bit_width == 0 &&		\
107 				(reg)->bit_offset == 0 &&		\
108 				(reg)->access_width == 0)
109 
110 /* Evalutes to True if an optional cpc field is supported */
111 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?		\
112 				!!(cpc)->cpc_entry.int_value :		\
113 				!IS_NULL_REG(&(cpc)->cpc_entry.reg))
114 /*
115  * Arbitrary Retries in case the remote processor is slow to respond
116  * to PCC commands. Keeping it high enough to cover emulators where
117  * the processors run painfully slow.
118  */
119 #define NUM_RETRIES 500ULL
120 
121 #define define_one_cppc_ro(_name)		\
122 static struct kobj_attribute _name =		\
123 __ATTR(_name, 0444, show_##_name, NULL)
124 
125 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
126 
127 #define show_cppc_data(access_fn, struct_name, member_name)		\
128 	static ssize_t show_##member_name(struct kobject *kobj,		\
129 				struct kobj_attribute *attr, char *buf)	\
130 	{								\
131 		struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);		\
132 		struct struct_name st_name = {0};			\
133 		int ret;						\
134 									\
135 		ret = access_fn(cpc_ptr->cpu_id, &st_name);		\
136 		if (ret)						\
137 			return ret;					\
138 									\
139 		return scnprintf(buf, PAGE_SIZE, "%llu\n",		\
140 				(u64)st_name.member_name);		\
141 	}								\
142 	define_one_cppc_ro(member_name)
143 
144 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
145 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
146 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
147 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
148 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
149 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
150 
151 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
152 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
153 
show_feedback_ctrs(struct kobject * kobj,struct kobj_attribute * attr,char * buf)154 static ssize_t show_feedback_ctrs(struct kobject *kobj,
155 		struct kobj_attribute *attr, char *buf)
156 {
157 	struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
158 	struct cppc_perf_fb_ctrs fb_ctrs = {0};
159 	int ret;
160 
161 	ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
162 	if (ret)
163 		return ret;
164 
165 	return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
166 			fb_ctrs.reference, fb_ctrs.delivered);
167 }
168 define_one_cppc_ro(feedback_ctrs);
169 
170 static struct attribute *cppc_attrs[] = {
171 	&feedback_ctrs.attr,
172 	&reference_perf.attr,
173 	&wraparound_time.attr,
174 	&highest_perf.attr,
175 	&lowest_perf.attr,
176 	&lowest_nonlinear_perf.attr,
177 	&nominal_perf.attr,
178 	&nominal_freq.attr,
179 	&lowest_freq.attr,
180 	NULL
181 };
182 
183 static struct kobj_type cppc_ktype = {
184 	.sysfs_ops = &kobj_sysfs_ops,
185 	.default_attrs = cppc_attrs,
186 };
187 
check_pcc_chan(int pcc_ss_id,bool chk_err_bit)188 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
189 {
190 	int ret, status;
191 	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
192 	struct acpi_pcct_shared_memory __iomem *generic_comm_base =
193 		pcc_ss_data->pcc_comm_addr;
194 
195 	if (!pcc_ss_data->platform_owns_pcc)
196 		return 0;
197 
198 	/*
199 	 * Poll PCC status register every 3us(delay_us) for maximum of
200 	 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
201 	 */
202 	ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
203 					status & PCC_CMD_COMPLETE_MASK, 3,
204 					pcc_ss_data->deadline_us);
205 
206 	if (likely(!ret)) {
207 		pcc_ss_data->platform_owns_pcc = false;
208 		if (chk_err_bit && (status & PCC_ERROR_MASK))
209 			ret = -EIO;
210 	}
211 
212 	if (unlikely(ret))
213 		pr_err("PCC check channel failed for ss: %d. ret=%d\n",
214 		       pcc_ss_id, ret);
215 
216 	return ret;
217 }
218 
219 /*
220  * This function transfers the ownership of the PCC to the platform
221  * So it must be called while holding write_lock(pcc_lock)
222  */
send_pcc_cmd(int pcc_ss_id,u16 cmd)223 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
224 {
225 	int ret = -EIO, i;
226 	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
227 	struct acpi_pcct_shared_memory *generic_comm_base =
228 		(struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
229 	unsigned int time_delta;
230 
231 	/*
232 	 * For CMD_WRITE we know for a fact the caller should have checked
233 	 * the channel before writing to PCC space
234 	 */
235 	if (cmd == CMD_READ) {
236 		/*
237 		 * If there are pending cpc_writes, then we stole the channel
238 		 * before write completion, so first send a WRITE command to
239 		 * platform
240 		 */
241 		if (pcc_ss_data->pending_pcc_write_cmd)
242 			send_pcc_cmd(pcc_ss_id, CMD_WRITE);
243 
244 		ret = check_pcc_chan(pcc_ss_id, false);
245 		if (ret)
246 			goto end;
247 	} else /* CMD_WRITE */
248 		pcc_ss_data->pending_pcc_write_cmd = FALSE;
249 
250 	/*
251 	 * Handle the Minimum Request Turnaround Time(MRTT)
252 	 * "The minimum amount of time that OSPM must wait after the completion
253 	 * of a command before issuing the next command, in microseconds"
254 	 */
255 	if (pcc_ss_data->pcc_mrtt) {
256 		time_delta = ktime_us_delta(ktime_get(),
257 					    pcc_ss_data->last_cmd_cmpl_time);
258 		if (pcc_ss_data->pcc_mrtt > time_delta)
259 			udelay(pcc_ss_data->pcc_mrtt - time_delta);
260 	}
261 
262 	/*
263 	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
264 	 * "The maximum number of periodic requests that the subspace channel can
265 	 * support, reported in commands per minute. 0 indicates no limitation."
266 	 *
267 	 * This parameter should be ideally zero or large enough so that it can
268 	 * handle maximum number of requests that all the cores in the system can
269 	 * collectively generate. If it is not, we will follow the spec and just
270 	 * not send the request to the platform after hitting the MPAR limit in
271 	 * any 60s window
272 	 */
273 	if (pcc_ss_data->pcc_mpar) {
274 		if (pcc_ss_data->mpar_count == 0) {
275 			time_delta = ktime_ms_delta(ktime_get(),
276 						    pcc_ss_data->last_mpar_reset);
277 			if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
278 				pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
279 					 pcc_ss_id);
280 				ret = -EIO;
281 				goto end;
282 			}
283 			pcc_ss_data->last_mpar_reset = ktime_get();
284 			pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
285 		}
286 		pcc_ss_data->mpar_count--;
287 	}
288 
289 	/* Write to the shared comm region. */
290 	writew_relaxed(cmd, &generic_comm_base->command);
291 
292 	/* Flip CMD COMPLETE bit */
293 	writew_relaxed(0, &generic_comm_base->status);
294 
295 	pcc_ss_data->platform_owns_pcc = true;
296 
297 	/* Ring doorbell */
298 	ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
299 	if (ret < 0) {
300 		pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
301 		       pcc_ss_id, cmd, ret);
302 		goto end;
303 	}
304 
305 	/* wait for completion and check for PCC errro bit */
306 	ret = check_pcc_chan(pcc_ss_id, true);
307 
308 	if (pcc_ss_data->pcc_mrtt)
309 		pcc_ss_data->last_cmd_cmpl_time = ktime_get();
310 
311 	if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
312 		mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
313 	else
314 		mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
315 
316 end:
317 	if (cmd == CMD_WRITE) {
318 		if (unlikely(ret)) {
319 			for_each_possible_cpu(i) {
320 				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
321 				if (!desc)
322 					continue;
323 
324 				if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
325 					desc->write_cmd_status = ret;
326 			}
327 		}
328 		pcc_ss_data->pcc_write_cnt++;
329 		wake_up_all(&pcc_ss_data->pcc_write_wait_q);
330 	}
331 
332 	return ret;
333 }
334 
cppc_chan_tx_done(struct mbox_client * cl,void * msg,int ret)335 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
336 {
337 	if (ret < 0)
338 		pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
339 				*(u16 *)msg, ret);
340 	else
341 		pr_debug("TX completed. CMD sent:%x, ret:%d\n",
342 				*(u16 *)msg, ret);
343 }
344 
345 static struct mbox_client cppc_mbox_cl = {
346 	.tx_done = cppc_chan_tx_done,
347 	.knows_txdone = true,
348 };
349 
acpi_get_psd(struct cpc_desc * cpc_ptr,acpi_handle handle)350 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
351 {
352 	int result = -EFAULT;
353 	acpi_status status = AE_OK;
354 	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
355 	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
356 	struct acpi_buffer state = {0, NULL};
357 	union acpi_object  *psd = NULL;
358 	struct acpi_psd_package *pdomain;
359 
360 	status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
361 					    &buffer, ACPI_TYPE_PACKAGE);
362 	if (status == AE_NOT_FOUND)	/* _PSD is optional */
363 		return 0;
364 	if (ACPI_FAILURE(status))
365 		return -ENODEV;
366 
367 	psd = buffer.pointer;
368 	if (!psd || psd->package.count != 1) {
369 		pr_debug("Invalid _PSD data\n");
370 		goto end;
371 	}
372 
373 	pdomain = &(cpc_ptr->domain_info);
374 
375 	state.length = sizeof(struct acpi_psd_package);
376 	state.pointer = pdomain;
377 
378 	status = acpi_extract_package(&(psd->package.elements[0]),
379 		&format, &state);
380 	if (ACPI_FAILURE(status)) {
381 		pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
382 		goto end;
383 	}
384 
385 	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
386 		pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
387 		goto end;
388 	}
389 
390 	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
391 		pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
392 		goto end;
393 	}
394 
395 	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
396 	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
397 	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
398 		pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
399 		goto end;
400 	}
401 
402 	result = 0;
403 end:
404 	kfree(buffer.pointer);
405 	return result;
406 }
407 
408 /**
409  * acpi_get_psd_map - Map the CPUs in a common freq domain.
410  * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
411  *
412  *	Return: 0 for success or negative value for err.
413  */
acpi_get_psd_map(struct cppc_cpudata ** all_cpu_data)414 int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
415 {
416 	int count_target;
417 	int retval = 0;
418 	unsigned int i, j;
419 	cpumask_var_t covered_cpus;
420 	struct cppc_cpudata *pr, *match_pr;
421 	struct acpi_psd_package *pdomain;
422 	struct acpi_psd_package *match_pdomain;
423 	struct cpc_desc *cpc_ptr, *match_cpc_ptr;
424 
425 	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
426 		return -ENOMEM;
427 
428 	/*
429 	 * Now that we have _PSD data from all CPUs, let's setup P-state
430 	 * domain info.
431 	 */
432 	for_each_possible_cpu(i) {
433 		if (cpumask_test_cpu(i, covered_cpus))
434 			continue;
435 
436 		pr = all_cpu_data[i];
437 		cpc_ptr = per_cpu(cpc_desc_ptr, i);
438 		if (!cpc_ptr) {
439 			retval = -EFAULT;
440 			goto err_ret;
441 		}
442 
443 		pdomain = &(cpc_ptr->domain_info);
444 		cpumask_set_cpu(i, pr->shared_cpu_map);
445 		cpumask_set_cpu(i, covered_cpus);
446 		if (pdomain->num_processors <= 1)
447 			continue;
448 
449 		/* Validate the Domain info */
450 		count_target = pdomain->num_processors;
451 		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
452 			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
453 		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
454 			pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
455 		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
456 			pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
457 
458 		for_each_possible_cpu(j) {
459 			if (i == j)
460 				continue;
461 
462 			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
463 			if (!match_cpc_ptr) {
464 				retval = -EFAULT;
465 				goto err_ret;
466 			}
467 
468 			match_pdomain = &(match_cpc_ptr->domain_info);
469 			if (match_pdomain->domain != pdomain->domain)
470 				continue;
471 
472 			/* Here i and j are in the same domain */
473 			if (match_pdomain->num_processors != count_target) {
474 				retval = -EFAULT;
475 				goto err_ret;
476 			}
477 
478 			if (pdomain->coord_type != match_pdomain->coord_type) {
479 				retval = -EFAULT;
480 				goto err_ret;
481 			}
482 
483 			cpumask_set_cpu(j, covered_cpus);
484 			cpumask_set_cpu(j, pr->shared_cpu_map);
485 		}
486 
487 		for_each_cpu(j, pr->shared_cpu_map) {
488 			if (i == j)
489 				continue;
490 
491 			match_pr = all_cpu_data[j];
492 			match_pr->shared_type = pr->shared_type;
493 			cpumask_copy(match_pr->shared_cpu_map,
494 				     pr->shared_cpu_map);
495 		}
496 	}
497 	goto out;
498 
499 err_ret:
500 	for_each_possible_cpu(i) {
501 		pr = all_cpu_data[i];
502 
503 		/* Assume no coordination on any error parsing domain info */
504 		cpumask_clear(pr->shared_cpu_map);
505 		cpumask_set_cpu(i, pr->shared_cpu_map);
506 		pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
507 	}
508 out:
509 	free_cpumask_var(covered_cpus);
510 	return retval;
511 }
512 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
513 
register_pcc_channel(int pcc_ss_idx)514 static int register_pcc_channel(int pcc_ss_idx)
515 {
516 	struct acpi_pcct_hw_reduced *cppc_ss;
517 	u64 usecs_lat;
518 
519 	if (pcc_ss_idx >= 0) {
520 		pcc_data[pcc_ss_idx]->pcc_channel =
521 			pcc_mbox_request_channel(&cppc_mbox_cl,	pcc_ss_idx);
522 
523 		if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
524 			pr_err("Failed to find PCC channel for subspace %d\n",
525 			       pcc_ss_idx);
526 			return -ENODEV;
527 		}
528 
529 		/*
530 		 * The PCC mailbox controller driver should
531 		 * have parsed the PCCT (global table of all
532 		 * PCC channels) and stored pointers to the
533 		 * subspace communication region in con_priv.
534 		 */
535 		cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
536 
537 		if (!cppc_ss) {
538 			pr_err("No PCC subspace found for %d CPPC\n",
539 			       pcc_ss_idx);
540 			return -ENODEV;
541 		}
542 
543 		/*
544 		 * cppc_ss->latency is just a Nominal value. In reality
545 		 * the remote processor could be much slower to reply.
546 		 * So add an arbitrary amount of wait on top of Nominal.
547 		 */
548 		usecs_lat = NUM_RETRIES * cppc_ss->latency;
549 		pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
550 		pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
551 		pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
552 		pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
553 
554 		pcc_data[pcc_ss_idx]->pcc_comm_addr =
555 			acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
556 		if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
557 			pr_err("Failed to ioremap PCC comm region mem for %d\n",
558 			       pcc_ss_idx);
559 			return -ENOMEM;
560 		}
561 
562 		/* Set flag so that we don't come here for each CPU. */
563 		pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
564 	}
565 
566 	return 0;
567 }
568 
569 /**
570  * cpc_ffh_supported() - check if FFH reading supported
571  *
572  * Check if the architecture has support for functional fixed hardware
573  * read/write capability.
574  *
575  * Return: true for supported, false for not supported
576  */
cpc_ffh_supported(void)577 bool __weak cpc_ffh_supported(void)
578 {
579 	return false;
580 }
581 
582 /**
583  * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
584  *
585  * Check and allocate the cppc_pcc_data memory.
586  * In some processor configurations it is possible that same subspace
587  * is shared between multiple CPUs. This is seen especially in CPUs
588  * with hardware multi-threading support.
589  *
590  * Return: 0 for success, errno for failure
591  */
pcc_data_alloc(int pcc_ss_id)592 static int pcc_data_alloc(int pcc_ss_id)
593 {
594 	if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
595 		return -EINVAL;
596 
597 	if (pcc_data[pcc_ss_id]) {
598 		pcc_data[pcc_ss_id]->refcount++;
599 	} else {
600 		pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
601 					      GFP_KERNEL);
602 		if (!pcc_data[pcc_ss_id])
603 			return -ENOMEM;
604 		pcc_data[pcc_ss_id]->refcount++;
605 	}
606 
607 	return 0;
608 }
609 
610 /* Check if CPPC revision + num_ent combination is supported */
is_cppc_supported(int revision,int num_ent)611 static bool is_cppc_supported(int revision, int num_ent)
612 {
613 	int expected_num_ent;
614 
615 	switch (revision) {
616 	case CPPC_V2_REV:
617 		expected_num_ent = CPPC_V2_NUM_ENT;
618 		break;
619 	case CPPC_V3_REV:
620 		expected_num_ent = CPPC_V3_NUM_ENT;
621 		break;
622 	default:
623 		pr_debug("Firmware exports unsupported CPPC revision: %d\n",
624 			revision);
625 		return false;
626 	}
627 
628 	if (expected_num_ent != num_ent) {
629 		pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
630 			num_ent, expected_num_ent, revision);
631 		return false;
632 	}
633 
634 	return true;
635 }
636 
637 /*
638  * An example CPC table looks like the following.
639  *
640  *	Name(_CPC, Package()
641  *			{
642  *			17,
643  *			NumEntries
644  *			1,
645  *			// Revision
646  *			ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
647  *			// Highest Performance
648  *			ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
649  *			// Nominal Performance
650  *			ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
651  *			// Lowest Nonlinear Performance
652  *			ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
653  *			// Lowest Performance
654  *			ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
655  *			// Guaranteed Performance Register
656  *			ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
657  *			// Desired Performance Register
658  *			ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
659  *			..
660  *			..
661  *			..
662  *
663  *		}
664  * Each Register() encodes how to access that specific register.
665  * e.g. a sample PCC entry has the following encoding:
666  *
667  *	Register (
668  *		PCC,
669  *		AddressSpaceKeyword
670  *		8,
671  *		//RegisterBitWidth
672  *		8,
673  *		//RegisterBitOffset
674  *		0x30,
675  *		//RegisterAddress
676  *		9
677  *		//AccessSize (subspace ID)
678  *		0
679  *		)
680  *	}
681  */
682 
683 /**
684  * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
685  * @pr: Ptr to acpi_processor containing this CPU's logical ID.
686  *
687  *	Return: 0 for success or negative value for err.
688  */
acpi_cppc_processor_probe(struct acpi_processor * pr)689 int acpi_cppc_processor_probe(struct acpi_processor *pr)
690 {
691 	struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
692 	union acpi_object *out_obj, *cpc_obj;
693 	struct cpc_desc *cpc_ptr;
694 	struct cpc_reg *gas_t;
695 	struct device *cpu_dev;
696 	acpi_handle handle = pr->handle;
697 	unsigned int num_ent, i, cpc_rev;
698 	int pcc_subspace_id = -1;
699 	acpi_status status;
700 	int ret = -EFAULT;
701 
702 	/* Parse the ACPI _CPC table for this CPU. */
703 	status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
704 			ACPI_TYPE_PACKAGE);
705 	if (ACPI_FAILURE(status)) {
706 		ret = -ENODEV;
707 		goto out_buf_free;
708 	}
709 
710 	out_obj = (union acpi_object *) output.pointer;
711 
712 	cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
713 	if (!cpc_ptr) {
714 		ret = -ENOMEM;
715 		goto out_buf_free;
716 	}
717 
718 	/* First entry is NumEntries. */
719 	cpc_obj = &out_obj->package.elements[0];
720 	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
721 		num_ent = cpc_obj->integer.value;
722 	} else {
723 		pr_debug("Unexpected entry type(%d) for NumEntries\n",
724 				cpc_obj->type);
725 		goto out_free;
726 	}
727 	cpc_ptr->num_entries = num_ent;
728 
729 	/* Second entry should be revision. */
730 	cpc_obj = &out_obj->package.elements[1];
731 	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
732 		cpc_rev = cpc_obj->integer.value;
733 	} else {
734 		pr_debug("Unexpected entry type(%d) for Revision\n",
735 				cpc_obj->type);
736 		goto out_free;
737 	}
738 	cpc_ptr->version = cpc_rev;
739 
740 	if (!is_cppc_supported(cpc_rev, num_ent))
741 		goto out_free;
742 
743 	/* Iterate through remaining entries in _CPC */
744 	for (i = 2; i < num_ent; i++) {
745 		cpc_obj = &out_obj->package.elements[i];
746 
747 		if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
748 			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
749 			cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
750 		} else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
751 			gas_t = (struct cpc_reg *)
752 				cpc_obj->buffer.pointer;
753 
754 			/*
755 			 * The PCC Subspace index is encoded inside
756 			 * the CPC table entries. The same PCC index
757 			 * will be used for all the PCC entries,
758 			 * so extract it only once.
759 			 */
760 			if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
761 				if (pcc_subspace_id < 0) {
762 					pcc_subspace_id = gas_t->access_width;
763 					if (pcc_data_alloc(pcc_subspace_id))
764 						goto out_free;
765 				} else if (pcc_subspace_id != gas_t->access_width) {
766 					pr_debug("Mismatched PCC ids.\n");
767 					goto out_free;
768 				}
769 			} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
770 				if (gas_t->address) {
771 					void __iomem *addr;
772 
773 					addr = ioremap(gas_t->address, gas_t->bit_width/8);
774 					if (!addr)
775 						goto out_free;
776 					cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
777 				}
778 			} else {
779 				if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
780 					/* Support only PCC ,SYS MEM and FFH type regs */
781 					pr_debug("Unsupported register type: %d\n", gas_t->space_id);
782 					goto out_free;
783 				}
784 			}
785 
786 			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
787 			memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
788 		} else {
789 			pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
790 			goto out_free;
791 		}
792 	}
793 	per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
794 
795 	/*
796 	 * Initialize the remaining cpc_regs as unsupported.
797 	 * Example: In case FW exposes CPPC v2, the below loop will initialize
798 	 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
799 	 */
800 	for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
801 		cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
802 		cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
803 	}
804 
805 
806 	/* Store CPU Logical ID */
807 	cpc_ptr->cpu_id = pr->id;
808 
809 	/* Parse PSD data for this CPU */
810 	ret = acpi_get_psd(cpc_ptr, handle);
811 	if (ret)
812 		goto out_free;
813 
814 	/* Register PCC channel once for all PCC subspace ID. */
815 	if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
816 		ret = register_pcc_channel(pcc_subspace_id);
817 		if (ret)
818 			goto out_free;
819 
820 		init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
821 		init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
822 	}
823 
824 	/* Everything looks okay */
825 	pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
826 
827 	/* Add per logical CPU nodes for reading its feedback counters. */
828 	cpu_dev = get_cpu_device(pr->id);
829 	if (!cpu_dev) {
830 		ret = -EINVAL;
831 		goto out_free;
832 	}
833 
834 	/* Plug PSD data into this CPU's CPC descriptor. */
835 	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
836 
837 	ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
838 			"acpi_cppc");
839 	if (ret) {
840 		per_cpu(cpc_desc_ptr, pr->id) = NULL;
841 		kobject_put(&cpc_ptr->kobj);
842 		goto out_free;
843 	}
844 
845 	kfree(output.pointer);
846 	return 0;
847 
848 out_free:
849 	/* Free all the mapped sys mem areas for this CPU */
850 	for (i = 2; i < cpc_ptr->num_entries; i++) {
851 		void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
852 
853 		if (addr)
854 			iounmap(addr);
855 	}
856 	kfree(cpc_ptr);
857 
858 out_buf_free:
859 	kfree(output.pointer);
860 	return ret;
861 }
862 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
863 
864 /**
865  * acpi_cppc_processor_exit - Cleanup CPC structs.
866  * @pr: Ptr to acpi_processor containing this CPU's logical ID.
867  *
868  * Return: Void
869  */
acpi_cppc_processor_exit(struct acpi_processor * pr)870 void acpi_cppc_processor_exit(struct acpi_processor *pr)
871 {
872 	struct cpc_desc *cpc_ptr;
873 	unsigned int i;
874 	void __iomem *addr;
875 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
876 
877 	if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
878 		if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
879 			pcc_data[pcc_ss_id]->refcount--;
880 			if (!pcc_data[pcc_ss_id]->refcount) {
881 				pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
882 				kfree(pcc_data[pcc_ss_id]);
883 				pcc_data[pcc_ss_id] = NULL;
884 			}
885 		}
886 	}
887 
888 	cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
889 	if (!cpc_ptr)
890 		return;
891 
892 	/* Free all the mapped sys mem areas for this CPU */
893 	for (i = 2; i < cpc_ptr->num_entries; i++) {
894 		addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
895 		if (addr)
896 			iounmap(addr);
897 	}
898 
899 	kobject_put(&cpc_ptr->kobj);
900 	kfree(cpc_ptr);
901 }
902 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
903 
904 /**
905  * cpc_read_ffh() - Read FFH register
906  * @cpunum:	CPU number to read
907  * @reg:	cppc register information
908  * @val:	place holder for return value
909  *
910  * Read bit_width bits from a specified address and bit_offset
911  *
912  * Return: 0 for success and error code
913  */
cpc_read_ffh(int cpunum,struct cpc_reg * reg,u64 * val)914 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
915 {
916 	return -ENOTSUPP;
917 }
918 
919 /**
920  * cpc_write_ffh() - Write FFH register
921  * @cpunum:	CPU number to write
922  * @reg:	cppc register information
923  * @val:	value to write
924  *
925  * Write value of bit_width bits to a specified address and bit_offset
926  *
927  * Return: 0 for success and error code
928  */
cpc_write_ffh(int cpunum,struct cpc_reg * reg,u64 val)929 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
930 {
931 	return -ENOTSUPP;
932 }
933 
934 /*
935  * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
936  * as fast as possible. We have already mapped the PCC subspace during init, so
937  * we can directly write to it.
938  */
939 
cpc_read(int cpu,struct cpc_register_resource * reg_res,u64 * val)940 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
941 {
942 	int ret_val = 0;
943 	void __iomem *vaddr = 0;
944 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
945 	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
946 
947 	if (reg_res->type == ACPI_TYPE_INTEGER) {
948 		*val = reg_res->cpc_entry.int_value;
949 		return ret_val;
950 	}
951 
952 	*val = 0;
953 	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
954 		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
955 	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
956 		vaddr = reg_res->sys_mem_vaddr;
957 	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
958 		return cpc_read_ffh(cpu, reg, val);
959 	else
960 		return acpi_os_read_memory((acpi_physical_address)reg->address,
961 				val, reg->bit_width);
962 
963 	switch (reg->bit_width) {
964 		case 8:
965 			*val = readb_relaxed(vaddr);
966 			break;
967 		case 16:
968 			*val = readw_relaxed(vaddr);
969 			break;
970 		case 32:
971 			*val = readl_relaxed(vaddr);
972 			break;
973 		case 64:
974 			*val = readq_relaxed(vaddr);
975 			break;
976 		default:
977 			pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
978 				 reg->bit_width, pcc_ss_id);
979 			ret_val = -EFAULT;
980 	}
981 
982 	return ret_val;
983 }
984 
cpc_write(int cpu,struct cpc_register_resource * reg_res,u64 val)985 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
986 {
987 	int ret_val = 0;
988 	void __iomem *vaddr = 0;
989 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
990 	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
991 
992 	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
993 		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
994 	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
995 		vaddr = reg_res->sys_mem_vaddr;
996 	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
997 		return cpc_write_ffh(cpu, reg, val);
998 	else
999 		return acpi_os_write_memory((acpi_physical_address)reg->address,
1000 				val, reg->bit_width);
1001 
1002 	switch (reg->bit_width) {
1003 		case 8:
1004 			writeb_relaxed(val, vaddr);
1005 			break;
1006 		case 16:
1007 			writew_relaxed(val, vaddr);
1008 			break;
1009 		case 32:
1010 			writel_relaxed(val, vaddr);
1011 			break;
1012 		case 64:
1013 			writeq_relaxed(val, vaddr);
1014 			break;
1015 		default:
1016 			pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1017 				 reg->bit_width, pcc_ss_id);
1018 			ret_val = -EFAULT;
1019 			break;
1020 	}
1021 
1022 	return ret_val;
1023 }
1024 
1025 /**
1026  * cppc_get_desired_perf - Get the value of desired performance register.
1027  * @cpunum: CPU from which to get desired performance.
1028  * @desired_perf: address of a variable to store the returned desired performance
1029  *
1030  * Return: 0 for success, -EIO otherwise.
1031  */
cppc_get_desired_perf(int cpunum,u64 * desired_perf)1032 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1033 {
1034 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1035 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1036 	struct cpc_register_resource *desired_reg;
1037 	struct cppc_pcc_data *pcc_ss_data = NULL;
1038 
1039 	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1040 
1041 	if (CPC_IN_PCC(desired_reg)) {
1042 		int ret = 0;
1043 
1044 		if (pcc_ss_id < 0)
1045 			return -EIO;
1046 
1047 		pcc_ss_data = pcc_data[pcc_ss_id];
1048 
1049 		down_write(&pcc_ss_data->pcc_lock);
1050 
1051 		if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1052 			cpc_read(cpunum, desired_reg, desired_perf);
1053 		else
1054 			ret = -EIO;
1055 
1056 		up_write(&pcc_ss_data->pcc_lock);
1057 
1058 		return ret;
1059 	}
1060 
1061 	cpc_read(cpunum, desired_reg, desired_perf);
1062 
1063 	return 0;
1064 }
1065 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1066 
1067 /**
1068  * cppc_get_perf_caps - Get a CPU's performance capabilities.
1069  * @cpunum: CPU from which to get capabilities info.
1070  * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1071  *
1072  * Return: 0 for success with perf_caps populated else -ERRNO.
1073  */
cppc_get_perf_caps(int cpunum,struct cppc_perf_caps * perf_caps)1074 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1075 {
1076 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1077 	struct cpc_register_resource *highest_reg, *lowest_reg,
1078 		*lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1079 		*low_freq_reg = NULL, *nom_freq_reg = NULL;
1080 	u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1081 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1082 	struct cppc_pcc_data *pcc_ss_data = NULL;
1083 	int ret = 0, regs_in_pcc = 0;
1084 
1085 	if (!cpc_desc) {
1086 		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1087 		return -ENODEV;
1088 	}
1089 
1090 	highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1091 	lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1092 	lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1093 	nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1094 	low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1095 	nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1096 	guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1097 
1098 	/* Are any of the regs PCC ?*/
1099 	if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1100 		CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1101 		CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1102 		if (pcc_ss_id < 0) {
1103 			pr_debug("Invalid pcc_ss_id\n");
1104 			return -ENODEV;
1105 		}
1106 		pcc_ss_data = pcc_data[pcc_ss_id];
1107 		regs_in_pcc = 1;
1108 		down_write(&pcc_ss_data->pcc_lock);
1109 		/* Ring doorbell once to update PCC subspace */
1110 		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1111 			ret = -EIO;
1112 			goto out_err;
1113 		}
1114 	}
1115 
1116 	cpc_read(cpunum, highest_reg, &high);
1117 	perf_caps->highest_perf = high;
1118 
1119 	cpc_read(cpunum, lowest_reg, &low);
1120 	perf_caps->lowest_perf = low;
1121 
1122 	cpc_read(cpunum, nominal_reg, &nom);
1123 	perf_caps->nominal_perf = nom;
1124 
1125 	if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
1126 	    IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1127 		perf_caps->guaranteed_perf = 0;
1128 	} else {
1129 		cpc_read(cpunum, guaranteed_reg, &guaranteed);
1130 		perf_caps->guaranteed_perf = guaranteed;
1131 	}
1132 
1133 	cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1134 	perf_caps->lowest_nonlinear_perf = min_nonlinear;
1135 
1136 	if (!high || !low || !nom || !min_nonlinear)
1137 		ret = -EFAULT;
1138 
1139 	/* Read optional lowest and nominal frequencies if present */
1140 	if (CPC_SUPPORTED(low_freq_reg))
1141 		cpc_read(cpunum, low_freq_reg, &low_f);
1142 
1143 	if (CPC_SUPPORTED(nom_freq_reg))
1144 		cpc_read(cpunum, nom_freq_reg, &nom_f);
1145 
1146 	perf_caps->lowest_freq = low_f;
1147 	perf_caps->nominal_freq = nom_f;
1148 
1149 
1150 out_err:
1151 	if (regs_in_pcc)
1152 		up_write(&pcc_ss_data->pcc_lock);
1153 	return ret;
1154 }
1155 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1156 
1157 /**
1158  * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1159  * @cpunum: CPU from which to read counters.
1160  * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1161  *
1162  * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1163  */
cppc_get_perf_ctrs(int cpunum,struct cppc_perf_fb_ctrs * perf_fb_ctrs)1164 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1165 {
1166 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1167 	struct cpc_register_resource *delivered_reg, *reference_reg,
1168 		*ref_perf_reg, *ctr_wrap_reg;
1169 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1170 	struct cppc_pcc_data *pcc_ss_data = NULL;
1171 	u64 delivered, reference, ref_perf, ctr_wrap_time;
1172 	int ret = 0, regs_in_pcc = 0;
1173 
1174 	if (!cpc_desc) {
1175 		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1176 		return -ENODEV;
1177 	}
1178 
1179 	delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1180 	reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1181 	ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1182 	ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1183 
1184 	/*
1185 	 * If reference perf register is not supported then we should
1186 	 * use the nominal perf value
1187 	 */
1188 	if (!CPC_SUPPORTED(ref_perf_reg))
1189 		ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1190 
1191 	/* Are any of the regs PCC ?*/
1192 	if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1193 		CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1194 		if (pcc_ss_id < 0) {
1195 			pr_debug("Invalid pcc_ss_id\n");
1196 			return -ENODEV;
1197 		}
1198 		pcc_ss_data = pcc_data[pcc_ss_id];
1199 		down_write(&pcc_ss_data->pcc_lock);
1200 		regs_in_pcc = 1;
1201 		/* Ring doorbell once to update PCC subspace */
1202 		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1203 			ret = -EIO;
1204 			goto out_err;
1205 		}
1206 	}
1207 
1208 	cpc_read(cpunum, delivered_reg, &delivered);
1209 	cpc_read(cpunum, reference_reg, &reference);
1210 	cpc_read(cpunum, ref_perf_reg, &ref_perf);
1211 
1212 	/*
1213 	 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1214 	 * performance counters are assumed to never wrap during the lifetime of
1215 	 * platform
1216 	 */
1217 	ctr_wrap_time = (u64)(~((u64)0));
1218 	if (CPC_SUPPORTED(ctr_wrap_reg))
1219 		cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1220 
1221 	if (!delivered || !reference ||	!ref_perf) {
1222 		ret = -EFAULT;
1223 		goto out_err;
1224 	}
1225 
1226 	perf_fb_ctrs->delivered = delivered;
1227 	perf_fb_ctrs->reference = reference;
1228 	perf_fb_ctrs->reference_perf = ref_perf;
1229 	perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1230 out_err:
1231 	if (regs_in_pcc)
1232 		up_write(&pcc_ss_data->pcc_lock);
1233 	return ret;
1234 }
1235 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1236 
1237 /**
1238  * cppc_set_perf - Set a CPU's performance controls.
1239  * @cpu: CPU for which to set performance controls.
1240  * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1241  *
1242  * Return: 0 for success, -ERRNO otherwise.
1243  */
cppc_set_perf(int cpu,struct cppc_perf_ctrls * perf_ctrls)1244 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1245 {
1246 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1247 	struct cpc_register_resource *desired_reg;
1248 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1249 	struct cppc_pcc_data *pcc_ss_data = NULL;
1250 	int ret = 0;
1251 
1252 	if (!cpc_desc) {
1253 		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1254 		return -ENODEV;
1255 	}
1256 
1257 	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1258 
1259 	/*
1260 	 * This is Phase-I where we want to write to CPC registers
1261 	 * -> We want all CPUs to be able to execute this phase in parallel
1262 	 *
1263 	 * Since read_lock can be acquired by multiple CPUs simultaneously we
1264 	 * achieve that goal here
1265 	 */
1266 	if (CPC_IN_PCC(desired_reg)) {
1267 		if (pcc_ss_id < 0) {
1268 			pr_debug("Invalid pcc_ss_id\n");
1269 			return -ENODEV;
1270 		}
1271 		pcc_ss_data = pcc_data[pcc_ss_id];
1272 		down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1273 		if (pcc_ss_data->platform_owns_pcc) {
1274 			ret = check_pcc_chan(pcc_ss_id, false);
1275 			if (ret) {
1276 				up_read(&pcc_ss_data->pcc_lock);
1277 				return ret;
1278 			}
1279 		}
1280 		/*
1281 		 * Update the pending_write to make sure a PCC CMD_READ will not
1282 		 * arrive and steal the channel during the switch to write lock
1283 		 */
1284 		pcc_ss_data->pending_pcc_write_cmd = true;
1285 		cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1286 		cpc_desc->write_cmd_status = 0;
1287 	}
1288 
1289 	/*
1290 	 * Skip writing MIN/MAX until Linux knows how to come up with
1291 	 * useful values.
1292 	 */
1293 	cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1294 
1295 	if (CPC_IN_PCC(desired_reg))
1296 		up_read(&pcc_ss_data->pcc_lock);	/* END Phase-I */
1297 	/*
1298 	 * This is Phase-II where we transfer the ownership of PCC to Platform
1299 	 *
1300 	 * Short Summary: Basically if we think of a group of cppc_set_perf
1301 	 * requests that happened in short overlapping interval. The last CPU to
1302 	 * come out of Phase-I will enter Phase-II and ring the doorbell.
1303 	 *
1304 	 * We have the following requirements for Phase-II:
1305 	 *     1. We want to execute Phase-II only when there are no CPUs
1306 	 * currently executing in Phase-I
1307 	 *     2. Once we start Phase-II we want to avoid all other CPUs from
1308 	 * entering Phase-I.
1309 	 *     3. We want only one CPU among all those who went through Phase-I
1310 	 * to run phase-II
1311 	 *
1312 	 * If write_trylock fails to get the lock and doesn't transfer the
1313 	 * PCC ownership to the platform, then one of the following will be TRUE
1314 	 *     1. There is at-least one CPU in Phase-I which will later execute
1315 	 * write_trylock, so the CPUs in Phase-I will be responsible for
1316 	 * executing the Phase-II.
1317 	 *     2. Some other CPU has beaten this CPU to successfully execute the
1318 	 * write_trylock and has already acquired the write_lock. We know for a
1319 	 * fact it (other CPU acquiring the write_lock) couldn't have happened
1320 	 * before this CPU's Phase-I as we held the read_lock.
1321 	 *     3. Some other CPU executing pcc CMD_READ has stolen the
1322 	 * down_write, in which case, send_pcc_cmd will check for pending
1323 	 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1324 	 * So this CPU can be certain that its request will be delivered
1325 	 *    So in all cases, this CPU knows that its request will be delivered
1326 	 * by another CPU and can return
1327 	 *
1328 	 * After getting the down_write we still need to check for
1329 	 * pending_pcc_write_cmd to take care of the following scenario
1330 	 *    The thread running this code could be scheduled out between
1331 	 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1332 	 * could have delivered the request to Platform by triggering the
1333 	 * doorbell and transferred the ownership of PCC to platform. So this
1334 	 * avoids triggering an unnecessary doorbell and more importantly before
1335 	 * triggering the doorbell it makes sure that the PCC channel ownership
1336 	 * is still with OSPM.
1337 	 *   pending_pcc_write_cmd can also be cleared by a different CPU, if
1338 	 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1339 	 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1340 	 * case during a CMD_READ and if there are pending writes it delivers
1341 	 * the write command before servicing the read command
1342 	 */
1343 	if (CPC_IN_PCC(desired_reg)) {
1344 		if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1345 			/* Update only if there are pending write commands */
1346 			if (pcc_ss_data->pending_pcc_write_cmd)
1347 				send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1348 			up_write(&pcc_ss_data->pcc_lock);	/* END Phase-II */
1349 		} else
1350 			/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1351 			wait_event(pcc_ss_data->pcc_write_wait_q,
1352 				   cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1353 
1354 		/* send_pcc_cmd updates the status in case of failure */
1355 		ret = cpc_desc->write_cmd_status;
1356 	}
1357 	return ret;
1358 }
1359 EXPORT_SYMBOL_GPL(cppc_set_perf);
1360 
1361 /**
1362  * cppc_get_transition_latency - returns frequency transition latency in ns
1363  *
1364  * ACPI CPPC does not explicitly specifiy how a platform can specify the
1365  * transition latency for perfromance change requests. The closest we have
1366  * is the timing information from the PCCT tables which provides the info
1367  * on the number and frequency of PCC commands the platform can handle.
1368  */
cppc_get_transition_latency(int cpu_num)1369 unsigned int cppc_get_transition_latency(int cpu_num)
1370 {
1371 	/*
1372 	 * Expected transition latency is based on the PCCT timing values
1373 	 * Below are definition from ACPI spec:
1374 	 * pcc_nominal- Expected latency to process a command, in microseconds
1375 	 * pcc_mpar   - The maximum number of periodic requests that the subspace
1376 	 *              channel can support, reported in commands per minute. 0
1377 	 *              indicates no limitation.
1378 	 * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
1379 	 *              completion of a command before issuing the next command,
1380 	 *              in microseconds.
1381 	 */
1382 	unsigned int latency_ns = 0;
1383 	struct cpc_desc *cpc_desc;
1384 	struct cpc_register_resource *desired_reg;
1385 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1386 	struct cppc_pcc_data *pcc_ss_data;
1387 
1388 	cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1389 	if (!cpc_desc)
1390 		return CPUFREQ_ETERNAL;
1391 
1392 	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1393 	if (!CPC_IN_PCC(desired_reg))
1394 		return CPUFREQ_ETERNAL;
1395 
1396 	if (pcc_ss_id < 0)
1397 		return CPUFREQ_ETERNAL;
1398 
1399 	pcc_ss_data = pcc_data[pcc_ss_id];
1400 	if (pcc_ss_data->pcc_mpar)
1401 		latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1402 
1403 	latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1404 	latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1405 
1406 	return latency_ns;
1407 }
1408 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1409