1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4 *
5 * (C) Copyright 2014, 2015 Linaro Ltd.
6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7 *
8 * CPPC describes a few methods for controlling CPU performance using
9 * information from a per CPU table called CPC. This table is described in
10 * the ACPI v5.0+ specification. The table consists of a list of
11 * registers which may be memory mapped or hardware registers and also may
12 * include some static integer values.
13 *
14 * CPU performance is on an abstract continuous scale as against a discretized
15 * P-state scale which is tied to CPU frequency only. In brief, the basic
16 * operation involves:
17 *
18 * - OS makes a CPU performance request. (Can provide min and max bounds)
19 *
20 * - Platform (such as BMC) is free to optimize request within requested bounds
21 * depending on power/thermal budgets etc.
22 *
23 * - Platform conveys its decision back to OS
24 *
25 * The communication between OS and platform occurs through another medium
26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
27 * mechanism which includes doorbell semantics to indicate register updates.
28 * See drivers/mailbox/pcc.c for details on PCC.
29 *
30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31 * above specifications.
32 */
33
34 #define pr_fmt(fmt) "ACPI CPPC: " fmt
35
36 #include <linux/delay.h>
37 #include <linux/iopoll.h>
38 #include <linux/ktime.h>
39 #include <linux/rwsem.h>
40 #include <linux/wait.h>
41 #include <linux/topology.h>
42 #include <linux/dmi.h>
43 #include <linux/units.h>
44 #include <linux/unaligned.h>
45
46 #include <acpi/cppc_acpi.h>
47
48 struct cppc_pcc_data {
49 struct pcc_mbox_chan *pcc_channel;
50 void __iomem *pcc_comm_addr;
51 bool pcc_channel_acquired;
52 unsigned int deadline_us;
53 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
54
55 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
56 bool platform_owns_pcc; /* Ownership of PCC subspace */
57 unsigned int pcc_write_cnt; /* Running count of PCC write commands */
58
59 /*
60 * Lock to provide controlled access to the PCC channel.
61 *
62 * For performance critical usecases(currently cppc_set_perf)
63 * We need to take read_lock and check if channel belongs to OSPM
64 * before reading or writing to PCC subspace
65 * We need to take write_lock before transferring the channel
66 * ownership to the platform via a Doorbell
67 * This allows us to batch a number of CPPC requests if they happen
68 * to originate in about the same time
69 *
70 * For non-performance critical usecases(init)
71 * Take write_lock for all purposes which gives exclusive access
72 */
73 struct rw_semaphore pcc_lock;
74
75 /* Wait queue for CPUs whose requests were batched */
76 wait_queue_head_t pcc_write_wait_q;
77 ktime_t last_cmd_cmpl_time;
78 ktime_t last_mpar_reset;
79 int mpar_count;
80 int refcount;
81 };
82
83 /* Array to represent the PCC channel per subspace ID */
84 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
85 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
86 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
87
88 /*
89 * The cpc_desc structure contains the ACPI register details
90 * as described in the per CPU _CPC tables. The details
91 * include the type of register (e.g. PCC, System IO, FFH etc.)
92 * and destination addresses which lets us READ/WRITE CPU performance
93 * information using the appropriate I/O methods.
94 */
95 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
96
97 /* pcc mapped address + header size + offset within PCC subspace */
98 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
99 0x8 + (offs))
100
101 /* Check if a CPC register is in PCC */
102 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
103 (cpc)->cpc_entry.reg.space_id == \
104 ACPI_ADR_SPACE_PLATFORM_COMM)
105
106 /* Check if a CPC register is in FFH */
107 #define CPC_IN_FFH(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
108 (cpc)->cpc_entry.reg.space_id == \
109 ACPI_ADR_SPACE_FIXED_HARDWARE)
110
111 /* Check if a CPC register is in SystemMemory */
112 #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
113 (cpc)->cpc_entry.reg.space_id == \
114 ACPI_ADR_SPACE_SYSTEM_MEMORY)
115
116 /* Check if a CPC register is in SystemIo */
117 #define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
118 (cpc)->cpc_entry.reg.space_id == \
119 ACPI_ADR_SPACE_SYSTEM_IO)
120
121 /* Evaluates to True if reg is a NULL register descriptor */
122 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
123 (reg)->address == 0 && \
124 (reg)->bit_width == 0 && \
125 (reg)->bit_offset == 0 && \
126 (reg)->access_width == 0)
127
128 /* Evaluates to True if an optional cpc field is supported */
129 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
130 !!(cpc)->cpc_entry.int_value : \
131 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
132 /*
133 * Arbitrary Retries in case the remote processor is slow to respond
134 * to PCC commands. Keeping it high enough to cover emulators where
135 * the processors run painfully slow.
136 */
137 #define NUM_RETRIES 500ULL
138
139 #define OVER_16BTS_MASK ~0xFFFFULL
140
141 #define define_one_cppc_ro(_name) \
142 static struct kobj_attribute _name = \
143 __ATTR(_name, 0444, show_##_name, NULL)
144
145 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
146
147 #define show_cppc_data(access_fn, struct_name, member_name) \
148 static ssize_t show_##member_name(struct kobject *kobj, \
149 struct kobj_attribute *attr, char *buf) \
150 { \
151 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
152 struct struct_name st_name = {0}; \
153 int ret; \
154 \
155 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
156 if (ret) \
157 return ret; \
158 \
159 return sysfs_emit(buf, "%llu\n", \
160 (u64)st_name.member_name); \
161 } \
162 define_one_cppc_ro(member_name)
163
164 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
165 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
166 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
167 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
168 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, guaranteed_perf);
169 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
170 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
171
172 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
173 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
174
175 /* Check for valid access_width, otherwise, fallback to using bit_width */
176 #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
177
178 /* Shift and apply the mask for CPC reads/writes */
179 #define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \
180 GENMASK(((reg)->bit_width) - 1, 0))
181 #define MASK_VAL_WRITE(reg, prev_val, val) \
182 ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \
183 ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \
184
show_feedback_ctrs(struct kobject * kobj,struct kobj_attribute * attr,char * buf)185 static ssize_t show_feedback_ctrs(struct kobject *kobj,
186 struct kobj_attribute *attr, char *buf)
187 {
188 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
189 struct cppc_perf_fb_ctrs fb_ctrs = {0};
190 int ret;
191
192 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
193 if (ret)
194 return ret;
195
196 return sysfs_emit(buf, "ref:%llu del:%llu\n",
197 fb_ctrs.reference, fb_ctrs.delivered);
198 }
199 define_one_cppc_ro(feedback_ctrs);
200
201 static struct attribute *cppc_attrs[] = {
202 &feedback_ctrs.attr,
203 &reference_perf.attr,
204 &wraparound_time.attr,
205 &highest_perf.attr,
206 &lowest_perf.attr,
207 &lowest_nonlinear_perf.attr,
208 &guaranteed_perf.attr,
209 &nominal_perf.attr,
210 &nominal_freq.attr,
211 &lowest_freq.attr,
212 NULL
213 };
214 ATTRIBUTE_GROUPS(cppc);
215
216 static const struct kobj_type cppc_ktype = {
217 .sysfs_ops = &kobj_sysfs_ops,
218 .default_groups = cppc_groups,
219 };
220
check_pcc_chan(int pcc_ss_id,bool chk_err_bit)221 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
222 {
223 int ret, status;
224 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
225 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
226 pcc_ss_data->pcc_comm_addr;
227
228 if (!pcc_ss_data->platform_owns_pcc)
229 return 0;
230
231 /*
232 * Poll PCC status register every 3us(delay_us) for maximum of
233 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
234 */
235 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
236 status & PCC_CMD_COMPLETE_MASK, 3,
237 pcc_ss_data->deadline_us);
238
239 if (likely(!ret)) {
240 pcc_ss_data->platform_owns_pcc = false;
241 if (chk_err_bit && (status & PCC_ERROR_MASK))
242 ret = -EIO;
243 }
244
245 if (unlikely(ret))
246 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
247 pcc_ss_id, ret);
248
249 return ret;
250 }
251
252 /*
253 * This function transfers the ownership of the PCC to the platform
254 * So it must be called while holding write_lock(pcc_lock)
255 */
send_pcc_cmd(int pcc_ss_id,u16 cmd)256 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
257 {
258 int ret = -EIO, i;
259 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
260 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
261 pcc_ss_data->pcc_comm_addr;
262 unsigned int time_delta;
263
264 /*
265 * For CMD_WRITE we know for a fact the caller should have checked
266 * the channel before writing to PCC space
267 */
268 if (cmd == CMD_READ) {
269 /*
270 * If there are pending cpc_writes, then we stole the channel
271 * before write completion, so first send a WRITE command to
272 * platform
273 */
274 if (pcc_ss_data->pending_pcc_write_cmd)
275 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
276
277 ret = check_pcc_chan(pcc_ss_id, false);
278 if (ret)
279 goto end;
280 } else /* CMD_WRITE */
281 pcc_ss_data->pending_pcc_write_cmd = FALSE;
282
283 /*
284 * Handle the Minimum Request Turnaround Time(MRTT)
285 * "The minimum amount of time that OSPM must wait after the completion
286 * of a command before issuing the next command, in microseconds"
287 */
288 if (pcc_ss_data->pcc_mrtt) {
289 time_delta = ktime_us_delta(ktime_get(),
290 pcc_ss_data->last_cmd_cmpl_time);
291 if (pcc_ss_data->pcc_mrtt > time_delta)
292 udelay(pcc_ss_data->pcc_mrtt - time_delta);
293 }
294
295 /*
296 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
297 * "The maximum number of periodic requests that the subspace channel can
298 * support, reported in commands per minute. 0 indicates no limitation."
299 *
300 * This parameter should be ideally zero or large enough so that it can
301 * handle maximum number of requests that all the cores in the system can
302 * collectively generate. If it is not, we will follow the spec and just
303 * not send the request to the platform after hitting the MPAR limit in
304 * any 60s window
305 */
306 if (pcc_ss_data->pcc_mpar) {
307 if (pcc_ss_data->mpar_count == 0) {
308 time_delta = ktime_ms_delta(ktime_get(),
309 pcc_ss_data->last_mpar_reset);
310 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
311 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
312 pcc_ss_id);
313 ret = -EIO;
314 goto end;
315 }
316 pcc_ss_data->last_mpar_reset = ktime_get();
317 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
318 }
319 pcc_ss_data->mpar_count--;
320 }
321
322 /* Write to the shared comm region. */
323 writew_relaxed(cmd, &generic_comm_base->command);
324
325 /* Flip CMD COMPLETE bit */
326 writew_relaxed(0, &generic_comm_base->status);
327
328 pcc_ss_data->platform_owns_pcc = true;
329
330 /* Ring doorbell */
331 ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
332 if (ret < 0) {
333 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
334 pcc_ss_id, cmd, ret);
335 goto end;
336 }
337
338 /* wait for completion and check for PCC error bit */
339 ret = check_pcc_chan(pcc_ss_id, true);
340
341 if (pcc_ss_data->pcc_mrtt)
342 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
343
344 if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
345 mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
346 else
347 mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
348
349 end:
350 if (cmd == CMD_WRITE) {
351 if (unlikely(ret)) {
352 for_each_possible_cpu(i) {
353 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
354
355 if (!desc)
356 continue;
357
358 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
359 desc->write_cmd_status = ret;
360 }
361 }
362 pcc_ss_data->pcc_write_cnt++;
363 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
364 }
365
366 return ret;
367 }
368
cppc_chan_tx_done(struct mbox_client * cl,void * msg,int ret)369 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
370 {
371 if (ret < 0)
372 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
373 *(u16 *)msg, ret);
374 else
375 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
376 *(u16 *)msg, ret);
377 }
378
379 static struct mbox_client cppc_mbox_cl = {
380 .tx_done = cppc_chan_tx_done,
381 .knows_txdone = true,
382 };
383
acpi_get_psd(struct cpc_desc * cpc_ptr,acpi_handle handle)384 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
385 {
386 int result = -EFAULT;
387 acpi_status status = AE_OK;
388 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
389 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
390 struct acpi_buffer state = {0, NULL};
391 union acpi_object *psd = NULL;
392 struct acpi_psd_package *pdomain;
393
394 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
395 &buffer, ACPI_TYPE_PACKAGE);
396 if (status == AE_NOT_FOUND) /* _PSD is optional */
397 return 0;
398 if (ACPI_FAILURE(status))
399 return -ENODEV;
400
401 psd = buffer.pointer;
402 if (!psd || psd->package.count != 1) {
403 pr_debug("Invalid _PSD data\n");
404 goto end;
405 }
406
407 pdomain = &(cpc_ptr->domain_info);
408
409 state.length = sizeof(struct acpi_psd_package);
410 state.pointer = pdomain;
411
412 status = acpi_extract_package(&(psd->package.elements[0]),
413 &format, &state);
414 if (ACPI_FAILURE(status)) {
415 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
416 goto end;
417 }
418
419 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
420 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
421 goto end;
422 }
423
424 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
425 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
426 goto end;
427 }
428
429 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
430 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
431 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
432 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
433 goto end;
434 }
435
436 result = 0;
437 end:
438 kfree(buffer.pointer);
439 return result;
440 }
441
acpi_cpc_valid(void)442 bool acpi_cpc_valid(void)
443 {
444 struct cpc_desc *cpc_ptr;
445 int cpu;
446
447 if (acpi_disabled)
448 return false;
449
450 for_each_present_cpu(cpu) {
451 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
452 if (!cpc_ptr)
453 return false;
454 }
455
456 return true;
457 }
458 EXPORT_SYMBOL_GPL(acpi_cpc_valid);
459
cppc_allow_fast_switch(void)460 bool cppc_allow_fast_switch(void)
461 {
462 struct cpc_register_resource *desired_reg;
463 struct cpc_desc *cpc_ptr;
464 int cpu;
465
466 for_each_present_cpu(cpu) {
467 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
468 desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
469 if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
470 !CPC_IN_SYSTEM_IO(desired_reg))
471 return false;
472 }
473
474 return true;
475 }
476 EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
477
478 /**
479 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
480 * @cpu: Find all CPUs that share a domain with cpu.
481 * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
482 *
483 * Return: 0 for success or negative value for err.
484 */
acpi_get_psd_map(unsigned int cpu,struct cppc_cpudata * cpu_data)485 int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
486 {
487 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
488 struct acpi_psd_package *match_pdomain;
489 struct acpi_psd_package *pdomain;
490 int count_target, i;
491
492 /*
493 * Now that we have _PSD data from all CPUs, let's setup P-state
494 * domain info.
495 */
496 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
497 if (!cpc_ptr)
498 return -EFAULT;
499
500 pdomain = &(cpc_ptr->domain_info);
501 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
502 if (pdomain->num_processors <= 1)
503 return 0;
504
505 /* Validate the Domain info */
506 count_target = pdomain->num_processors;
507 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
508 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
509 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
510 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
511 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
512 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
513
514 for_each_possible_cpu(i) {
515 if (i == cpu)
516 continue;
517
518 match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
519 if (!match_cpc_ptr)
520 goto err_fault;
521
522 match_pdomain = &(match_cpc_ptr->domain_info);
523 if (match_pdomain->domain != pdomain->domain)
524 continue;
525
526 /* Here i and cpu are in the same domain */
527 if (match_pdomain->num_processors != count_target)
528 goto err_fault;
529
530 if (pdomain->coord_type != match_pdomain->coord_type)
531 goto err_fault;
532
533 cpumask_set_cpu(i, cpu_data->shared_cpu_map);
534 }
535
536 return 0;
537
538 err_fault:
539 /* Assume no coordination on any error parsing domain info */
540 cpumask_clear(cpu_data->shared_cpu_map);
541 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
542 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
543
544 return -EFAULT;
545 }
546 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
547
register_pcc_channel(int pcc_ss_idx)548 static int register_pcc_channel(int pcc_ss_idx)
549 {
550 struct pcc_mbox_chan *pcc_chan;
551 u64 usecs_lat;
552
553 if (pcc_ss_idx >= 0) {
554 pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
555
556 if (IS_ERR(pcc_chan)) {
557 pr_err("Failed to find PCC channel for subspace %d\n",
558 pcc_ss_idx);
559 return -ENODEV;
560 }
561
562 pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
563 /*
564 * cppc_ss->latency is just a Nominal value. In reality
565 * the remote processor could be much slower to reply.
566 * So add an arbitrary amount of wait on top of Nominal.
567 */
568 usecs_lat = NUM_RETRIES * pcc_chan->latency;
569 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
570 pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
571 pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
572 pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
573
574 pcc_data[pcc_ss_idx]->pcc_comm_addr =
575 acpi_os_ioremap(pcc_chan->shmem_base_addr,
576 pcc_chan->shmem_size);
577 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
578 pr_err("Failed to ioremap PCC comm region mem for %d\n",
579 pcc_ss_idx);
580 return -ENOMEM;
581 }
582
583 /* Set flag so that we don't come here for each CPU. */
584 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
585 }
586
587 return 0;
588 }
589
590 /**
591 * cpc_ffh_supported() - check if FFH reading supported
592 *
593 * Check if the architecture has support for functional fixed hardware
594 * read/write capability.
595 *
596 * Return: true for supported, false for not supported
597 */
cpc_ffh_supported(void)598 bool __weak cpc_ffh_supported(void)
599 {
600 return false;
601 }
602
603 /**
604 * cpc_supported_by_cpu() - check if CPPC is supported by CPU
605 *
606 * Check if the architectural support for CPPC is present even
607 * if the _OSC hasn't prescribed it
608 *
609 * Return: true for supported, false for not supported
610 */
cpc_supported_by_cpu(void)611 bool __weak cpc_supported_by_cpu(void)
612 {
613 return false;
614 }
615
616 /**
617 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
618 * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package.
619 *
620 * Check and allocate the cppc_pcc_data memory.
621 * In some processor configurations it is possible that same subspace
622 * is shared between multiple CPUs. This is seen especially in CPUs
623 * with hardware multi-threading support.
624 *
625 * Return: 0 for success, errno for failure
626 */
pcc_data_alloc(int pcc_ss_id)627 static int pcc_data_alloc(int pcc_ss_id)
628 {
629 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
630 return -EINVAL;
631
632 if (pcc_data[pcc_ss_id]) {
633 pcc_data[pcc_ss_id]->refcount++;
634 } else {
635 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
636 GFP_KERNEL);
637 if (!pcc_data[pcc_ss_id])
638 return -ENOMEM;
639 pcc_data[pcc_ss_id]->refcount++;
640 }
641
642 return 0;
643 }
644
645 /*
646 * An example CPC table looks like the following.
647 *
648 * Name (_CPC, Package() {
649 * 17, // NumEntries
650 * 1, // Revision
651 * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance
652 * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance
653 * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance
654 * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance
655 * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register
656 * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register
657 * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
658 * ...
659 * ...
660 * ...
661 * }
662 * Each Register() encodes how to access that specific register.
663 * e.g. a sample PCC entry has the following encoding:
664 *
665 * Register (
666 * PCC, // AddressSpaceKeyword
667 * 8, // RegisterBitWidth
668 * 8, // RegisterBitOffset
669 * 0x30, // RegisterAddress
670 * 9, // AccessSize (subspace ID)
671 * )
672 */
673
674 /**
675 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
676 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
677 *
678 * Return: 0 for success or negative value for err.
679 */
acpi_cppc_processor_probe(struct acpi_processor * pr)680 int acpi_cppc_processor_probe(struct acpi_processor *pr)
681 {
682 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
683 union acpi_object *out_obj, *cpc_obj;
684 struct cpc_desc *cpc_ptr;
685 struct cpc_reg *gas_t;
686 struct device *cpu_dev;
687 acpi_handle handle = pr->handle;
688 unsigned int num_ent, i, cpc_rev;
689 int pcc_subspace_id = -1;
690 acpi_status status;
691 int ret = -ENODATA;
692
693 if (!osc_sb_cppc2_support_acked) {
694 pr_debug("CPPC v2 _OSC not acked\n");
695 if (!cpc_supported_by_cpu()) {
696 pr_debug("CPPC is not supported by the CPU\n");
697 return -ENODEV;
698 }
699 }
700
701 /* Parse the ACPI _CPC table for this CPU. */
702 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
703 ACPI_TYPE_PACKAGE);
704 if (ACPI_FAILURE(status)) {
705 ret = -ENODEV;
706 goto out_buf_free;
707 }
708
709 out_obj = (union acpi_object *) output.pointer;
710
711 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
712 if (!cpc_ptr) {
713 ret = -ENOMEM;
714 goto out_buf_free;
715 }
716
717 /* First entry is NumEntries. */
718 cpc_obj = &out_obj->package.elements[0];
719 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
720 num_ent = cpc_obj->integer.value;
721 if (num_ent <= 1) {
722 pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
723 num_ent, pr->id);
724 goto out_free;
725 }
726 } else {
727 pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
728 cpc_obj->type, pr->id);
729 goto out_free;
730 }
731
732 /* Second entry should be revision. */
733 cpc_obj = &out_obj->package.elements[1];
734 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
735 cpc_rev = cpc_obj->integer.value;
736 } else {
737 pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
738 cpc_obj->type, pr->id);
739 goto out_free;
740 }
741
742 if (cpc_rev < CPPC_V2_REV) {
743 pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
744 pr->id);
745 goto out_free;
746 }
747
748 /*
749 * Disregard _CPC if the number of entries in the return pachage is not
750 * as expected, but support future revisions being proper supersets of
751 * the v3 and only causing more entries to be returned by _CPC.
752 */
753 if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
754 (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
755 (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
756 pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
757 num_ent, pr->id);
758 goto out_free;
759 }
760 if (cpc_rev > CPPC_V3_REV) {
761 num_ent = CPPC_V3_NUM_ENT;
762 cpc_rev = CPPC_V3_REV;
763 }
764
765 cpc_ptr->num_entries = num_ent;
766 cpc_ptr->version = cpc_rev;
767
768 /* Iterate through remaining entries in _CPC */
769 for (i = 2; i < num_ent; i++) {
770 cpc_obj = &out_obj->package.elements[i];
771
772 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
773 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
774 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
775 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
776 gas_t = (struct cpc_reg *)
777 cpc_obj->buffer.pointer;
778
779 /*
780 * The PCC Subspace index is encoded inside
781 * the CPC table entries. The same PCC index
782 * will be used for all the PCC entries,
783 * so extract it only once.
784 */
785 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
786 if (pcc_subspace_id < 0) {
787 pcc_subspace_id = gas_t->access_width;
788 if (pcc_data_alloc(pcc_subspace_id))
789 goto out_free;
790 } else if (pcc_subspace_id != gas_t->access_width) {
791 pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
792 pr->id);
793 goto out_free;
794 }
795 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
796 if (gas_t->address) {
797 void __iomem *addr;
798 size_t access_width;
799
800 if (!osc_cpc_flexible_adr_space_confirmed) {
801 pr_debug("Flexible address space capability not supported\n");
802 if (!cpc_supported_by_cpu())
803 goto out_free;
804 }
805
806 access_width = GET_BIT_WIDTH(gas_t) / 8;
807 addr = ioremap(gas_t->address, access_width);
808 if (!addr)
809 goto out_free;
810 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
811 }
812 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
813 if (gas_t->access_width < 1 || gas_t->access_width > 3) {
814 /*
815 * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
816 * SystemIO doesn't implement 64-bit
817 * registers.
818 */
819 pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
820 gas_t->access_width);
821 goto out_free;
822 }
823 if (gas_t->address & OVER_16BTS_MASK) {
824 /* SystemIO registers use 16-bit integer addresses */
825 pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
826 gas_t->address);
827 goto out_free;
828 }
829 if (!osc_cpc_flexible_adr_space_confirmed) {
830 pr_debug("Flexible address space capability not supported\n");
831 if (!cpc_supported_by_cpu())
832 goto out_free;
833 }
834 } else {
835 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
836 /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
837 pr_debug("Unsupported register type (%d) in _CPC\n",
838 gas_t->space_id);
839 goto out_free;
840 }
841 }
842
843 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
844 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
845 } else {
846 pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
847 i, pr->id);
848 goto out_free;
849 }
850 }
851 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
852
853 /*
854 * Initialize the remaining cpc_regs as unsupported.
855 * Example: In case FW exposes CPPC v2, the below loop will initialize
856 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
857 */
858 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
859 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
860 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
861 }
862
863
864 /* Store CPU Logical ID */
865 cpc_ptr->cpu_id = pr->id;
866 raw_spin_lock_init(&cpc_ptr->rmw_lock);
867
868 /* Parse PSD data for this CPU */
869 ret = acpi_get_psd(cpc_ptr, handle);
870 if (ret)
871 goto out_free;
872
873 /* Register PCC channel once for all PCC subspace ID. */
874 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
875 ret = register_pcc_channel(pcc_subspace_id);
876 if (ret)
877 goto out_free;
878
879 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
880 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
881 }
882
883 /* Everything looks okay */
884 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
885
886 /* Add per logical CPU nodes for reading its feedback counters. */
887 cpu_dev = get_cpu_device(pr->id);
888 if (!cpu_dev) {
889 ret = -EINVAL;
890 goto out_free;
891 }
892
893 /* Plug PSD data into this CPU's CPC descriptor. */
894 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
895
896 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
897 "acpi_cppc");
898 if (ret) {
899 per_cpu(cpc_desc_ptr, pr->id) = NULL;
900 kobject_put(&cpc_ptr->kobj);
901 goto out_free;
902 }
903
904 kfree(output.pointer);
905 return 0;
906
907 out_free:
908 /* Free all the mapped sys mem areas for this CPU */
909 for (i = 2; i < cpc_ptr->num_entries; i++) {
910 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
911
912 if (addr)
913 iounmap(addr);
914 }
915 kfree(cpc_ptr);
916
917 out_buf_free:
918 kfree(output.pointer);
919 return ret;
920 }
921 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
922
923 /**
924 * acpi_cppc_processor_exit - Cleanup CPC structs.
925 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
926 *
927 * Return: Void
928 */
acpi_cppc_processor_exit(struct acpi_processor * pr)929 void acpi_cppc_processor_exit(struct acpi_processor *pr)
930 {
931 struct cpc_desc *cpc_ptr;
932 unsigned int i;
933 void __iomem *addr;
934 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
935
936 if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
937 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
938 pcc_data[pcc_ss_id]->refcount--;
939 if (!pcc_data[pcc_ss_id]->refcount) {
940 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
941 kfree(pcc_data[pcc_ss_id]);
942 pcc_data[pcc_ss_id] = NULL;
943 }
944 }
945 }
946
947 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
948 if (!cpc_ptr)
949 return;
950
951 /* Free all the mapped sys mem areas for this CPU */
952 for (i = 2; i < cpc_ptr->num_entries; i++) {
953 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
954 if (addr)
955 iounmap(addr);
956 }
957
958 kobject_put(&cpc_ptr->kobj);
959 kfree(cpc_ptr);
960 }
961 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
962
963 /**
964 * cpc_read_ffh() - Read FFH register
965 * @cpunum: CPU number to read
966 * @reg: cppc register information
967 * @val: place holder for return value
968 *
969 * Read bit_width bits from a specified address and bit_offset
970 *
971 * Return: 0 for success and error code
972 */
cpc_read_ffh(int cpunum,struct cpc_reg * reg,u64 * val)973 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
974 {
975 return -ENOTSUPP;
976 }
977
978 /**
979 * cpc_write_ffh() - Write FFH register
980 * @cpunum: CPU number to write
981 * @reg: cppc register information
982 * @val: value to write
983 *
984 * Write value of bit_width bits to a specified address and bit_offset
985 *
986 * Return: 0 for success and error code
987 */
cpc_write_ffh(int cpunum,struct cpc_reg * reg,u64 val)988 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
989 {
990 return -ENOTSUPP;
991 }
992
993 /*
994 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
995 * as fast as possible. We have already mapped the PCC subspace during init, so
996 * we can directly write to it.
997 */
998
cpc_read(int cpu,struct cpc_register_resource * reg_res,u64 * val)999 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
1000 {
1001 void __iomem *vaddr = NULL;
1002 int size;
1003 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1004 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1005
1006 if (reg_res->type == ACPI_TYPE_INTEGER) {
1007 *val = reg_res->cpc_entry.int_value;
1008 return 0;
1009 }
1010
1011 *val = 0;
1012 size = GET_BIT_WIDTH(reg);
1013
1014 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1015 u32 val_u32;
1016 acpi_status status;
1017
1018 status = acpi_os_read_port((acpi_io_address)reg->address,
1019 &val_u32, size);
1020 if (ACPI_FAILURE(status)) {
1021 pr_debug("Error: Failed to read SystemIO port %llx\n",
1022 reg->address);
1023 return -EFAULT;
1024 }
1025
1026 *val = val_u32;
1027 return 0;
1028 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
1029 /*
1030 * For registers in PCC space, the register size is determined
1031 * by the bit width field; the access size is used to indicate
1032 * the PCC subspace id.
1033 */
1034 size = reg->bit_width;
1035 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1036 }
1037 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1038 vaddr = reg_res->sys_mem_vaddr;
1039 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1040 return cpc_read_ffh(cpu, reg, val);
1041 else
1042 return acpi_os_read_memory((acpi_physical_address)reg->address,
1043 val, size);
1044
1045 switch (size) {
1046 case 8:
1047 *val = readb_relaxed(vaddr);
1048 break;
1049 case 16:
1050 *val = readw_relaxed(vaddr);
1051 break;
1052 case 32:
1053 *val = readl_relaxed(vaddr);
1054 break;
1055 case 64:
1056 *val = readq_relaxed(vaddr);
1057 break;
1058 default:
1059 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1060 pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
1061 size, reg->address);
1062 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
1063 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1064 size, pcc_ss_id);
1065 }
1066 return -EFAULT;
1067 }
1068
1069 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1070 *val = MASK_VAL_READ(reg, *val);
1071
1072 return 0;
1073 }
1074
cpc_write(int cpu,struct cpc_register_resource * reg_res,u64 val)1075 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1076 {
1077 int ret_val = 0;
1078 int size;
1079 u64 prev_val;
1080 void __iomem *vaddr = NULL;
1081 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1082 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1083 struct cpc_desc *cpc_desc;
1084 unsigned long flags;
1085
1086 size = GET_BIT_WIDTH(reg);
1087
1088 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1089 acpi_status status;
1090
1091 status = acpi_os_write_port((acpi_io_address)reg->address,
1092 (u32)val, size);
1093 if (ACPI_FAILURE(status)) {
1094 pr_debug("Error: Failed to write SystemIO port %llx\n",
1095 reg->address);
1096 return -EFAULT;
1097 }
1098
1099 return 0;
1100 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
1101 /*
1102 * For registers in PCC space, the register size is determined
1103 * by the bit width field; the access size is used to indicate
1104 * the PCC subspace id.
1105 */
1106 size = reg->bit_width;
1107 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1108 }
1109 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1110 vaddr = reg_res->sys_mem_vaddr;
1111 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1112 return cpc_write_ffh(cpu, reg, val);
1113 else
1114 return acpi_os_write_memory((acpi_physical_address)reg->address,
1115 val, size);
1116
1117 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1118 cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1119 if (!cpc_desc) {
1120 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1121 return -ENODEV;
1122 }
1123
1124 raw_spin_lock_irqsave(&cpc_desc->rmw_lock, flags);
1125 switch (size) {
1126 case 8:
1127 prev_val = readb_relaxed(vaddr);
1128 break;
1129 case 16:
1130 prev_val = readw_relaxed(vaddr);
1131 break;
1132 case 32:
1133 prev_val = readl_relaxed(vaddr);
1134 break;
1135 case 64:
1136 prev_val = readq_relaxed(vaddr);
1137 break;
1138 default:
1139 raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
1140 return -EFAULT;
1141 }
1142 val = MASK_VAL_WRITE(reg, prev_val, val);
1143 }
1144
1145 switch (size) {
1146 case 8:
1147 writeb_relaxed(val, vaddr);
1148 break;
1149 case 16:
1150 writew_relaxed(val, vaddr);
1151 break;
1152 case 32:
1153 writel_relaxed(val, vaddr);
1154 break;
1155 case 64:
1156 writeq_relaxed(val, vaddr);
1157 break;
1158 default:
1159 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1160 pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
1161 size, reg->address);
1162 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
1163 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1164 size, pcc_ss_id);
1165 }
1166 ret_val = -EFAULT;
1167 break;
1168 }
1169
1170 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1171 raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
1172
1173 return ret_val;
1174 }
1175
cppc_get_perf(int cpunum,enum cppc_regs reg_idx,u64 * perf)1176 static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
1177 {
1178 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1179 struct cpc_register_resource *reg;
1180
1181 if (!cpc_desc) {
1182 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1183 return -ENODEV;
1184 }
1185
1186 reg = &cpc_desc->cpc_regs[reg_idx];
1187
1188 if (CPC_IN_PCC(reg)) {
1189 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1190 struct cppc_pcc_data *pcc_ss_data = NULL;
1191 int ret = 0;
1192
1193 if (pcc_ss_id < 0)
1194 return -EIO;
1195
1196 pcc_ss_data = pcc_data[pcc_ss_id];
1197
1198 down_write(&pcc_ss_data->pcc_lock);
1199
1200 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1201 cpc_read(cpunum, reg, perf);
1202 else
1203 ret = -EIO;
1204
1205 up_write(&pcc_ss_data->pcc_lock);
1206
1207 return ret;
1208 }
1209
1210 cpc_read(cpunum, reg, perf);
1211
1212 return 0;
1213 }
1214
1215 /**
1216 * cppc_get_desired_perf - Get the desired performance register value.
1217 * @cpunum: CPU from which to get desired performance.
1218 * @desired_perf: Return address.
1219 *
1220 * Return: 0 for success, -EIO otherwise.
1221 */
cppc_get_desired_perf(int cpunum,u64 * desired_perf)1222 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1223 {
1224 return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
1225 }
1226 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1227
1228 /**
1229 * cppc_get_nominal_perf - Get the nominal performance register value.
1230 * @cpunum: CPU from which to get nominal performance.
1231 * @nominal_perf: Return address.
1232 *
1233 * Return: 0 for success, -EIO otherwise.
1234 */
cppc_get_nominal_perf(int cpunum,u64 * nominal_perf)1235 int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1236 {
1237 return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
1238 }
1239
1240 /**
1241 * cppc_get_highest_perf - Get the highest performance register value.
1242 * @cpunum: CPU from which to get highest performance.
1243 * @highest_perf: Return address.
1244 *
1245 * Return: 0 for success, -EIO otherwise.
1246 */
cppc_get_highest_perf(int cpunum,u64 * highest_perf)1247 int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
1248 {
1249 return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
1250 }
1251 EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
1252
1253 /**
1254 * cppc_get_epp_perf - Get the epp register value.
1255 * @cpunum: CPU from which to get epp preference value.
1256 * @epp_perf: Return address.
1257 *
1258 * Return: 0 for success, -EIO otherwise.
1259 */
cppc_get_epp_perf(int cpunum,u64 * epp_perf)1260 int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
1261 {
1262 return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
1263 }
1264 EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
1265
1266 /**
1267 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1268 * @cpunum: CPU from which to get capabilities info.
1269 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1270 *
1271 * Return: 0 for success with perf_caps populated else -ERRNO.
1272 */
cppc_get_perf_caps(int cpunum,struct cppc_perf_caps * perf_caps)1273 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1274 {
1275 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1276 struct cpc_register_resource *highest_reg, *lowest_reg,
1277 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1278 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1279 u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1280 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1281 struct cppc_pcc_data *pcc_ss_data = NULL;
1282 int ret = 0, regs_in_pcc = 0;
1283
1284 if (!cpc_desc) {
1285 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1286 return -ENODEV;
1287 }
1288
1289 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1290 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1291 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1292 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1293 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1294 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1295 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1296
1297 /* Are any of the regs PCC ?*/
1298 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1299 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1300 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1301 if (pcc_ss_id < 0) {
1302 pr_debug("Invalid pcc_ss_id\n");
1303 return -ENODEV;
1304 }
1305 pcc_ss_data = pcc_data[pcc_ss_id];
1306 regs_in_pcc = 1;
1307 down_write(&pcc_ss_data->pcc_lock);
1308 /* Ring doorbell once to update PCC subspace */
1309 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1310 ret = -EIO;
1311 goto out_err;
1312 }
1313 }
1314
1315 cpc_read(cpunum, highest_reg, &high);
1316 perf_caps->highest_perf = high;
1317
1318 cpc_read(cpunum, lowest_reg, &low);
1319 perf_caps->lowest_perf = low;
1320
1321 cpc_read(cpunum, nominal_reg, &nom);
1322 perf_caps->nominal_perf = nom;
1323
1324 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1325 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1326 perf_caps->guaranteed_perf = 0;
1327 } else {
1328 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1329 perf_caps->guaranteed_perf = guaranteed;
1330 }
1331
1332 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1333 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1334
1335 if (!high || !low || !nom || !min_nonlinear)
1336 ret = -EFAULT;
1337
1338 /* Read optional lowest and nominal frequencies if present */
1339 if (CPC_SUPPORTED(low_freq_reg))
1340 cpc_read(cpunum, low_freq_reg, &low_f);
1341
1342 if (CPC_SUPPORTED(nom_freq_reg))
1343 cpc_read(cpunum, nom_freq_reg, &nom_f);
1344
1345 perf_caps->lowest_freq = low_f;
1346 perf_caps->nominal_freq = nom_f;
1347
1348
1349 out_err:
1350 if (regs_in_pcc)
1351 up_write(&pcc_ss_data->pcc_lock);
1352 return ret;
1353 }
1354 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1355
1356 /**
1357 * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
1358 *
1359 * CPPC has flexibility about how CPU performance counters are accessed.
1360 * One of the choices is PCC regions, which can have a high access latency. This
1361 * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
1362 *
1363 * Return: true if any of the counters are in PCC regions, false otherwise
1364 */
cppc_perf_ctrs_in_pcc(void)1365 bool cppc_perf_ctrs_in_pcc(void)
1366 {
1367 int cpu;
1368
1369 for_each_present_cpu(cpu) {
1370 struct cpc_register_resource *ref_perf_reg;
1371 struct cpc_desc *cpc_desc;
1372
1373 cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1374
1375 if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
1376 CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
1377 CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
1378 return true;
1379
1380
1381 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1382
1383 /*
1384 * If reference perf register is not supported then we should
1385 * use the nominal perf value
1386 */
1387 if (!CPC_SUPPORTED(ref_perf_reg))
1388 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1389
1390 if (CPC_IN_PCC(ref_perf_reg))
1391 return true;
1392 }
1393
1394 return false;
1395 }
1396 EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
1397
1398 /**
1399 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1400 * @cpunum: CPU from which to read counters.
1401 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1402 *
1403 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1404 */
cppc_get_perf_ctrs(int cpunum,struct cppc_perf_fb_ctrs * perf_fb_ctrs)1405 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1406 {
1407 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1408 struct cpc_register_resource *delivered_reg, *reference_reg,
1409 *ref_perf_reg, *ctr_wrap_reg;
1410 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1411 struct cppc_pcc_data *pcc_ss_data = NULL;
1412 u64 delivered, reference, ref_perf, ctr_wrap_time;
1413 int ret = 0, regs_in_pcc = 0;
1414
1415 if (!cpc_desc) {
1416 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1417 return -ENODEV;
1418 }
1419
1420 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1421 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1422 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1423 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1424
1425 /*
1426 * If reference perf register is not supported then we should
1427 * use the nominal perf value
1428 */
1429 if (!CPC_SUPPORTED(ref_perf_reg))
1430 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1431
1432 /* Are any of the regs PCC ?*/
1433 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1434 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1435 if (pcc_ss_id < 0) {
1436 pr_debug("Invalid pcc_ss_id\n");
1437 return -ENODEV;
1438 }
1439 pcc_ss_data = pcc_data[pcc_ss_id];
1440 down_write(&pcc_ss_data->pcc_lock);
1441 regs_in_pcc = 1;
1442 /* Ring doorbell once to update PCC subspace */
1443 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1444 ret = -EIO;
1445 goto out_err;
1446 }
1447 }
1448
1449 cpc_read(cpunum, delivered_reg, &delivered);
1450 cpc_read(cpunum, reference_reg, &reference);
1451 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1452
1453 /*
1454 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1455 * performance counters are assumed to never wrap during the lifetime of
1456 * platform
1457 */
1458 ctr_wrap_time = (u64)(~((u64)0));
1459 if (CPC_SUPPORTED(ctr_wrap_reg))
1460 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1461
1462 if (!delivered || !reference || !ref_perf) {
1463 ret = -EFAULT;
1464 goto out_err;
1465 }
1466
1467 perf_fb_ctrs->delivered = delivered;
1468 perf_fb_ctrs->reference = reference;
1469 perf_fb_ctrs->reference_perf = ref_perf;
1470 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1471 out_err:
1472 if (regs_in_pcc)
1473 up_write(&pcc_ss_data->pcc_lock);
1474 return ret;
1475 }
1476 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1477
1478 /*
1479 * Set Energy Performance Preference Register value through
1480 * Performance Controls Interface
1481 */
cppc_set_epp_perf(int cpu,struct cppc_perf_ctrls * perf_ctrls,bool enable)1482 int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
1483 {
1484 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1485 struct cpc_register_resource *epp_set_reg;
1486 struct cpc_register_resource *auto_sel_reg;
1487 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1488 struct cppc_pcc_data *pcc_ss_data = NULL;
1489 int ret;
1490
1491 if (!cpc_desc) {
1492 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1493 return -ENODEV;
1494 }
1495
1496 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1497 epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
1498
1499 if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
1500 if (pcc_ss_id < 0) {
1501 pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
1502 return -ENODEV;
1503 }
1504
1505 if (CPC_SUPPORTED(auto_sel_reg)) {
1506 ret = cpc_write(cpu, auto_sel_reg, enable);
1507 if (ret)
1508 return ret;
1509 }
1510
1511 if (CPC_SUPPORTED(epp_set_reg)) {
1512 ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
1513 if (ret)
1514 return ret;
1515 }
1516
1517 pcc_ss_data = pcc_data[pcc_ss_id];
1518
1519 down_write(&pcc_ss_data->pcc_lock);
1520 /* after writing CPC, transfer the ownership of PCC to platform */
1521 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1522 up_write(&pcc_ss_data->pcc_lock);
1523 } else if (osc_cpc_flexible_adr_space_confirmed &&
1524 CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) {
1525 ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
1526 } else {
1527 ret = -ENOTSUPP;
1528 pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
1529 }
1530
1531 return ret;
1532 }
1533 EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
1534
1535 /**
1536 * cppc_get_auto_sel_caps - Read autonomous selection register.
1537 * @cpunum : CPU from which to read register.
1538 * @perf_caps : struct where autonomous selection register value is updated.
1539 */
cppc_get_auto_sel_caps(int cpunum,struct cppc_perf_caps * perf_caps)1540 int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1541 {
1542 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1543 struct cpc_register_resource *auto_sel_reg;
1544 u64 auto_sel;
1545
1546 if (!cpc_desc) {
1547 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1548 return -ENODEV;
1549 }
1550
1551 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1552
1553 if (!CPC_SUPPORTED(auto_sel_reg))
1554 pr_warn_once("Autonomous mode is not unsupported!\n");
1555
1556 if (CPC_IN_PCC(auto_sel_reg)) {
1557 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1558 struct cppc_pcc_data *pcc_ss_data = NULL;
1559 int ret = 0;
1560
1561 if (pcc_ss_id < 0)
1562 return -ENODEV;
1563
1564 pcc_ss_data = pcc_data[pcc_ss_id];
1565
1566 down_write(&pcc_ss_data->pcc_lock);
1567
1568 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
1569 cpc_read(cpunum, auto_sel_reg, &auto_sel);
1570 perf_caps->auto_sel = (bool)auto_sel;
1571 } else {
1572 ret = -EIO;
1573 }
1574
1575 up_write(&pcc_ss_data->pcc_lock);
1576
1577 return ret;
1578 }
1579
1580 return 0;
1581 }
1582 EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
1583
1584 /**
1585 * cppc_set_auto_sel - Write autonomous selection register.
1586 * @cpu : CPU to which to write register.
1587 * @enable : the desired value of autonomous selection resiter to be updated.
1588 */
cppc_set_auto_sel(int cpu,bool enable)1589 int cppc_set_auto_sel(int cpu, bool enable)
1590 {
1591 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1592 struct cpc_register_resource *auto_sel_reg;
1593 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1594 struct cppc_pcc_data *pcc_ss_data = NULL;
1595 int ret = -EINVAL;
1596
1597 if (!cpc_desc) {
1598 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1599 return -ENODEV;
1600 }
1601
1602 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1603
1604 if (CPC_IN_PCC(auto_sel_reg)) {
1605 if (pcc_ss_id < 0) {
1606 pr_debug("Invalid pcc_ss_id\n");
1607 return -ENODEV;
1608 }
1609
1610 if (CPC_SUPPORTED(auto_sel_reg)) {
1611 ret = cpc_write(cpu, auto_sel_reg, enable);
1612 if (ret)
1613 return ret;
1614 }
1615
1616 pcc_ss_data = pcc_data[pcc_ss_id];
1617
1618 down_write(&pcc_ss_data->pcc_lock);
1619 /* after writing CPC, transfer the ownership of PCC to platform */
1620 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1621 up_write(&pcc_ss_data->pcc_lock);
1622 } else {
1623 ret = -ENOTSUPP;
1624 pr_debug("_CPC in PCC is not supported\n");
1625 }
1626
1627 return ret;
1628 }
1629 EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
1630
1631 /**
1632 * cppc_set_enable - Set to enable CPPC on the processor by writing the
1633 * Continuous Performance Control package EnableRegister field.
1634 * @cpu: CPU for which to enable CPPC register.
1635 * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
1636 *
1637 * Return: 0 for success, -ERRNO or -EIO otherwise.
1638 */
cppc_set_enable(int cpu,bool enable)1639 int cppc_set_enable(int cpu, bool enable)
1640 {
1641 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1642 struct cpc_register_resource *enable_reg;
1643 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1644 struct cppc_pcc_data *pcc_ss_data = NULL;
1645 int ret = -EINVAL;
1646
1647 if (!cpc_desc) {
1648 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1649 return -EINVAL;
1650 }
1651
1652 enable_reg = &cpc_desc->cpc_regs[ENABLE];
1653
1654 if (CPC_IN_PCC(enable_reg)) {
1655
1656 if (pcc_ss_id < 0)
1657 return -EIO;
1658
1659 ret = cpc_write(cpu, enable_reg, enable);
1660 if (ret)
1661 return ret;
1662
1663 pcc_ss_data = pcc_data[pcc_ss_id];
1664
1665 down_write(&pcc_ss_data->pcc_lock);
1666 /* after writing CPC, transfer the ownership of PCC to platfrom */
1667 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1668 up_write(&pcc_ss_data->pcc_lock);
1669 return ret;
1670 }
1671
1672 return cpc_write(cpu, enable_reg, enable);
1673 }
1674 EXPORT_SYMBOL_GPL(cppc_set_enable);
1675
1676 /**
1677 * cppc_set_perf - Set a CPU's performance controls.
1678 * @cpu: CPU for which to set performance controls.
1679 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1680 *
1681 * Return: 0 for success, -ERRNO otherwise.
1682 */
cppc_set_perf(int cpu,struct cppc_perf_ctrls * perf_ctrls)1683 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1684 {
1685 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1686 struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg;
1687 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1688 struct cppc_pcc_data *pcc_ss_data = NULL;
1689 int ret = 0;
1690
1691 if (!cpc_desc) {
1692 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1693 return -ENODEV;
1694 }
1695
1696 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1697 min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
1698 max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
1699
1700 /*
1701 * This is Phase-I where we want to write to CPC registers
1702 * -> We want all CPUs to be able to execute this phase in parallel
1703 *
1704 * Since read_lock can be acquired by multiple CPUs simultaneously we
1705 * achieve that goal here
1706 */
1707 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1708 if (pcc_ss_id < 0) {
1709 pr_debug("Invalid pcc_ss_id\n");
1710 return -ENODEV;
1711 }
1712 pcc_ss_data = pcc_data[pcc_ss_id];
1713 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1714 if (pcc_ss_data->platform_owns_pcc) {
1715 ret = check_pcc_chan(pcc_ss_id, false);
1716 if (ret) {
1717 up_read(&pcc_ss_data->pcc_lock);
1718 return ret;
1719 }
1720 }
1721 /*
1722 * Update the pending_write to make sure a PCC CMD_READ will not
1723 * arrive and steal the channel during the switch to write lock
1724 */
1725 pcc_ss_data->pending_pcc_write_cmd = true;
1726 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1727 cpc_desc->write_cmd_status = 0;
1728 }
1729
1730 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1731
1732 /*
1733 * Only write if min_perf and max_perf not zero. Some drivers pass zero
1734 * value to min and max perf, but they don't mean to set the zero value,
1735 * they just don't want to write to those registers.
1736 */
1737 if (perf_ctrls->min_perf)
1738 cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf);
1739 if (perf_ctrls->max_perf)
1740 cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf);
1741
1742 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg))
1743 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
1744 /*
1745 * This is Phase-II where we transfer the ownership of PCC to Platform
1746 *
1747 * Short Summary: Basically if we think of a group of cppc_set_perf
1748 * requests that happened in short overlapping interval. The last CPU to
1749 * come out of Phase-I will enter Phase-II and ring the doorbell.
1750 *
1751 * We have the following requirements for Phase-II:
1752 * 1. We want to execute Phase-II only when there are no CPUs
1753 * currently executing in Phase-I
1754 * 2. Once we start Phase-II we want to avoid all other CPUs from
1755 * entering Phase-I.
1756 * 3. We want only one CPU among all those who went through Phase-I
1757 * to run phase-II
1758 *
1759 * If write_trylock fails to get the lock and doesn't transfer the
1760 * PCC ownership to the platform, then one of the following will be TRUE
1761 * 1. There is at-least one CPU in Phase-I which will later execute
1762 * write_trylock, so the CPUs in Phase-I will be responsible for
1763 * executing the Phase-II.
1764 * 2. Some other CPU has beaten this CPU to successfully execute the
1765 * write_trylock and has already acquired the write_lock. We know for a
1766 * fact it (other CPU acquiring the write_lock) couldn't have happened
1767 * before this CPU's Phase-I as we held the read_lock.
1768 * 3. Some other CPU executing pcc CMD_READ has stolen the
1769 * down_write, in which case, send_pcc_cmd will check for pending
1770 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1771 * So this CPU can be certain that its request will be delivered
1772 * So in all cases, this CPU knows that its request will be delivered
1773 * by another CPU and can return
1774 *
1775 * After getting the down_write we still need to check for
1776 * pending_pcc_write_cmd to take care of the following scenario
1777 * The thread running this code could be scheduled out between
1778 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1779 * could have delivered the request to Platform by triggering the
1780 * doorbell and transferred the ownership of PCC to platform. So this
1781 * avoids triggering an unnecessary doorbell and more importantly before
1782 * triggering the doorbell it makes sure that the PCC channel ownership
1783 * is still with OSPM.
1784 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1785 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1786 * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1787 * case during a CMD_READ and if there are pending writes it delivers
1788 * the write command before servicing the read command
1789 */
1790 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1791 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1792 /* Update only if there are pending write commands */
1793 if (pcc_ss_data->pending_pcc_write_cmd)
1794 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1795 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
1796 } else
1797 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1798 wait_event(pcc_ss_data->pcc_write_wait_q,
1799 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1800
1801 /* send_pcc_cmd updates the status in case of failure */
1802 ret = cpc_desc->write_cmd_status;
1803 }
1804 return ret;
1805 }
1806 EXPORT_SYMBOL_GPL(cppc_set_perf);
1807
1808 /**
1809 * cppc_get_transition_latency - returns frequency transition latency in ns
1810 * @cpu_num: CPU number for per_cpu().
1811 *
1812 * ACPI CPPC does not explicitly specify how a platform can specify the
1813 * transition latency for performance change requests. The closest we have
1814 * is the timing information from the PCCT tables which provides the info
1815 * on the number and frequency of PCC commands the platform can handle.
1816 *
1817 * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
1818 * then assume there is no latency.
1819 */
cppc_get_transition_latency(int cpu_num)1820 unsigned int cppc_get_transition_latency(int cpu_num)
1821 {
1822 /*
1823 * Expected transition latency is based on the PCCT timing values
1824 * Below are definition from ACPI spec:
1825 * pcc_nominal- Expected latency to process a command, in microseconds
1826 * pcc_mpar - The maximum number of periodic requests that the subspace
1827 * channel can support, reported in commands per minute. 0
1828 * indicates no limitation.
1829 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1830 * completion of a command before issuing the next command,
1831 * in microseconds.
1832 */
1833 unsigned int latency_ns = 0;
1834 struct cpc_desc *cpc_desc;
1835 struct cpc_register_resource *desired_reg;
1836 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1837 struct cppc_pcc_data *pcc_ss_data;
1838
1839 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1840 if (!cpc_desc)
1841 return CPUFREQ_ETERNAL;
1842
1843 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1844 if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
1845 return 0;
1846 else if (!CPC_IN_PCC(desired_reg))
1847 return CPUFREQ_ETERNAL;
1848
1849 if (pcc_ss_id < 0)
1850 return CPUFREQ_ETERNAL;
1851
1852 pcc_ss_data = pcc_data[pcc_ss_id];
1853 if (pcc_ss_data->pcc_mpar)
1854 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1855
1856 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1857 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1858
1859 return latency_ns;
1860 }
1861 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1862
1863 /* Minimum struct length needed for the DMI processor entry we want */
1864 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
1865
1866 /* Offset in the DMI processor structure for the max frequency */
1867 #define DMI_PROCESSOR_MAX_SPEED 0x14
1868
1869 /* Callback function used to retrieve the max frequency from DMI */
cppc_find_dmi_mhz(const struct dmi_header * dm,void * private)1870 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
1871 {
1872 const u8 *dmi_data = (const u8 *)dm;
1873 u16 *mhz = (u16 *)private;
1874
1875 if (dm->type == DMI_ENTRY_PROCESSOR &&
1876 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
1877 u16 val = (u16)get_unaligned((const u16 *)
1878 (dmi_data + DMI_PROCESSOR_MAX_SPEED));
1879 *mhz = umax(val, *mhz);
1880 }
1881 }
1882
1883 /* Look up the max frequency in DMI */
cppc_get_dmi_max_khz(void)1884 static u64 cppc_get_dmi_max_khz(void)
1885 {
1886 u16 mhz = 0;
1887
1888 dmi_walk(cppc_find_dmi_mhz, &mhz);
1889
1890 /*
1891 * Real stupid fallback value, just in case there is no
1892 * actual value set.
1893 */
1894 mhz = mhz ? mhz : 1;
1895
1896 return KHZ_PER_MHZ * mhz;
1897 }
1898
1899 /*
1900 * If CPPC lowest_freq and nominal_freq registers are exposed then we can
1901 * use them to convert perf to freq and vice versa. The conversion is
1902 * extrapolated as an affine function passing by the 2 points:
1903 * - (Low perf, Low freq)
1904 * - (Nominal perf, Nominal freq)
1905 */
cppc_perf_to_khz(struct cppc_perf_caps * caps,unsigned int perf)1906 unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
1907 {
1908 s64 retval, offset = 0;
1909 static u64 max_khz;
1910 u64 mul, div;
1911
1912 if (caps->lowest_freq && caps->nominal_freq) {
1913 /* Avoid special case when nominal_freq is equal to lowest_freq */
1914 if (caps->lowest_freq == caps->nominal_freq) {
1915 mul = caps->nominal_freq;
1916 div = caps->nominal_perf;
1917 } else {
1918 mul = caps->nominal_freq - caps->lowest_freq;
1919 div = caps->nominal_perf - caps->lowest_perf;
1920 }
1921 mul *= KHZ_PER_MHZ;
1922 offset = caps->nominal_freq * KHZ_PER_MHZ -
1923 div64_u64(caps->nominal_perf * mul, div);
1924 } else {
1925 if (!max_khz)
1926 max_khz = cppc_get_dmi_max_khz();
1927 mul = max_khz;
1928 div = caps->highest_perf;
1929 }
1930
1931 retval = offset + div64_u64(perf * mul, div);
1932 if (retval >= 0)
1933 return retval;
1934 return 0;
1935 }
1936 EXPORT_SYMBOL_GPL(cppc_perf_to_khz);
1937
cppc_khz_to_perf(struct cppc_perf_caps * caps,unsigned int freq)1938 unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
1939 {
1940 s64 retval, offset = 0;
1941 static u64 max_khz;
1942 u64 mul, div;
1943
1944 if (caps->lowest_freq && caps->nominal_freq) {
1945 /* Avoid special case when nominal_freq is equal to lowest_freq */
1946 if (caps->lowest_freq == caps->nominal_freq) {
1947 mul = caps->nominal_perf;
1948 div = caps->nominal_freq;
1949 } else {
1950 mul = caps->nominal_perf - caps->lowest_perf;
1951 div = caps->nominal_freq - caps->lowest_freq;
1952 }
1953 /*
1954 * We don't need to convert to kHz for computing offset and can
1955 * directly use nominal_freq and lowest_freq as the div64_u64
1956 * will remove the frequency unit.
1957 */
1958 offset = caps->nominal_perf -
1959 div64_u64(caps->nominal_freq * mul, div);
1960 /* But we need it for computing the perf level. */
1961 div *= KHZ_PER_MHZ;
1962 } else {
1963 if (!max_khz)
1964 max_khz = cppc_get_dmi_max_khz();
1965 mul = caps->highest_perf;
1966 div = max_khz;
1967 }
1968
1969 retval = offset + div64_u64(freq * mul, div);
1970 if (retval >= 0)
1971 return retval;
1972 return 0;
1973 }
1974 EXPORT_SYMBOL_GPL(cppc_khz_to_perf);
1975