1 /*
2 * cpuidle-powernv - idle state cpuidle driver.
3 * Adapted from drivers/cpuidle/cpuidle-pseries
4 *
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/moduleparam.h>
11 #include <linux/cpuidle.h>
12 #include <linux/cpu.h>
13 #include <linux/notifier.h>
14 #include <linux/clockchips.h>
15 #include <linux/of.h>
16 #include <linux/slab.h>
17
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/opal.h>
21 #include <asm/runlatch.h>
22
23 #define MAX_POWERNV_IDLE_STATES 8
24
25 struct cpuidle_driver powernv_idle_driver = {
26 .name = "powernv_idle",
27 .owner = THIS_MODULE,
28 };
29
30 static int max_idle_state;
31 static struct cpuidle_state *cpuidle_state_table;
32 static u64 default_snooze_timeout;
33 static bool snooze_timeout_en;
34
get_snooze_timeout(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)35 static u64 get_snooze_timeout(struct cpuidle_device *dev,
36 struct cpuidle_driver *drv,
37 int index)
38 {
39 int i;
40
41 if (unlikely(!snooze_timeout_en))
42 return default_snooze_timeout;
43
44 for (i = index + 1; i < drv->state_count; i++) {
45 struct cpuidle_state *s = &drv->states[i];
46 struct cpuidle_state_usage *su = &dev->states_usage[i];
47
48 if (s->disabled || su->disable)
49 continue;
50
51 return s->target_residency * tb_ticks_per_usec;
52 }
53
54 return default_snooze_timeout;
55 }
56
snooze_loop(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)57 static int snooze_loop(struct cpuidle_device *dev,
58 struct cpuidle_driver *drv,
59 int index)
60 {
61 u64 snooze_exit_time;
62
63 local_irq_enable();
64 set_thread_flag(TIF_POLLING_NRFLAG);
65
66 snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
67 ppc64_runlatch_off();
68 while (!need_resched()) {
69 HMT_low();
70 HMT_very_low();
71 if (snooze_timeout_en && get_tb() > snooze_exit_time)
72 break;
73 }
74
75 HMT_medium();
76 ppc64_runlatch_on();
77 clear_thread_flag(TIF_POLLING_NRFLAG);
78 smp_mb();
79 return index;
80 }
81
nap_loop(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)82 static int nap_loop(struct cpuidle_device *dev,
83 struct cpuidle_driver *drv,
84 int index)
85 {
86 ppc64_runlatch_off();
87 power7_idle();
88 ppc64_runlatch_on();
89 return index;
90 }
91
92 /* Register for fastsleep only in oneshot mode of broadcast */
93 #ifdef CONFIG_TICK_ONESHOT
fastsleep_loop(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)94 static int fastsleep_loop(struct cpuidle_device *dev,
95 struct cpuidle_driver *drv,
96 int index)
97 {
98 unsigned long old_lpcr = mfspr(SPRN_LPCR);
99 unsigned long new_lpcr;
100
101 if (unlikely(system_state < SYSTEM_RUNNING))
102 return index;
103
104 new_lpcr = old_lpcr;
105 /* Do not exit powersave upon decrementer as we've setup the timer
106 * offload.
107 */
108 new_lpcr &= ~LPCR_PECE1;
109
110 mtspr(SPRN_LPCR, new_lpcr);
111 power7_sleep();
112
113 mtspr(SPRN_LPCR, old_lpcr);
114
115 return index;
116 }
117 #endif
118 /*
119 * States for dedicated partition case.
120 */
121 static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = {
122 { /* Snooze */
123 .name = "snooze",
124 .desc = "snooze",
125 .exit_latency = 0,
126 .target_residency = 0,
127 .enter = &snooze_loop },
128 };
129
powernv_cpuidle_add_cpu_notifier(struct notifier_block * n,unsigned long action,void * hcpu)130 static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
131 unsigned long action, void *hcpu)
132 {
133 int hotcpu = (unsigned long)hcpu;
134 struct cpuidle_device *dev =
135 per_cpu(cpuidle_devices, hotcpu);
136
137 if (dev && cpuidle_get_driver()) {
138 switch (action) {
139 case CPU_ONLINE:
140 case CPU_ONLINE_FROZEN:
141 cpuidle_pause_and_lock();
142 cpuidle_enable_device(dev);
143 cpuidle_resume_and_unlock();
144 break;
145
146 case CPU_DEAD:
147 case CPU_DEAD_FROZEN:
148 cpuidle_pause_and_lock();
149 cpuidle_disable_device(dev);
150 cpuidle_resume_and_unlock();
151 break;
152
153 default:
154 return NOTIFY_DONE;
155 }
156 }
157 return NOTIFY_OK;
158 }
159
160 static struct notifier_block setup_hotplug_notifier = {
161 .notifier_call = powernv_cpuidle_add_cpu_notifier,
162 };
163
164 /*
165 * powernv_cpuidle_driver_init()
166 */
powernv_cpuidle_driver_init(void)167 static int powernv_cpuidle_driver_init(void)
168 {
169 int idle_state;
170 struct cpuidle_driver *drv = &powernv_idle_driver;
171
172 drv->state_count = 0;
173
174 for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
175 /* Is the state not enabled? */
176 if (cpuidle_state_table[idle_state].enter == NULL)
177 continue;
178
179 drv->states[drv->state_count] = /* structure copy */
180 cpuidle_state_table[idle_state];
181
182 drv->state_count += 1;
183 }
184
185 /*
186 * On the PowerNV platform cpu_present may be less than cpu_possible in
187 * cases when firmware detects the CPU, but it is not available to the
188 * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
189 * run time and hence cpu_devices are not created for those CPUs by the
190 * generic topology_init().
191 *
192 * drv->cpumask defaults to cpu_possible_mask in
193 * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where
194 * cpu_devices are not created for CPUs in cpu_possible_mask that
195 * cannot be hot-added later at run time.
196 *
197 * Trying cpuidle_register_device() on a CPU without a cpu_device is
198 * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
199 */
200
201 drv->cpumask = (struct cpumask *)cpu_present_mask;
202
203 return 0;
204 }
205
powernv_add_idle_states(void)206 static int powernv_add_idle_states(void)
207 {
208 struct device_node *power_mgt;
209 int nr_idle_states = 1; /* Snooze */
210 int dt_idle_states;
211 u32 *latency_ns, *residency_ns, *flags;
212 int i, rc;
213
214 /* Currently we have snooze statically defined */
215
216 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
217 if (!power_mgt) {
218 pr_warn("opal: PowerMgmt Node not found\n");
219 goto out;
220 }
221
222 /* Read values of any property to determine the num of idle states */
223 dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
224 if (dt_idle_states < 0) {
225 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
226 goto out;
227 }
228
229 flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
230 if (of_property_read_u32_array(power_mgt,
231 "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
232 pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
233 goto out_free_flags;
234 }
235
236 latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
237 rc = of_property_read_u32_array(power_mgt,
238 "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
239 if (rc) {
240 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
241 goto out_free_latency;
242 }
243
244 residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
245 rc = of_property_read_u32_array(power_mgt,
246 "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
247
248 for (i = 0; i < dt_idle_states; i++) {
249
250 /*
251 * Cpuidle accepts exit_latency and target_residency in us.
252 * Use default target_residency values if f/w does not expose it.
253 */
254 if (flags[i] & OPAL_PM_NAP_ENABLED) {
255 /* Add NAP state */
256 strcpy(powernv_states[nr_idle_states].name, "Nap");
257 strcpy(powernv_states[nr_idle_states].desc, "Nap");
258 powernv_states[nr_idle_states].flags = 0;
259 powernv_states[nr_idle_states].target_residency = 100;
260 powernv_states[nr_idle_states].enter = &nap_loop;
261 }
262
263 /*
264 * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
265 * within this config dependency check.
266 */
267 #ifdef CONFIG_TICK_ONESHOT
268 if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
269 flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
270 /* Add FASTSLEEP state */
271 strcpy(powernv_states[nr_idle_states].name, "FastSleep");
272 strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
273 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
274 powernv_states[nr_idle_states].target_residency = 300000;
275 powernv_states[nr_idle_states].enter = &fastsleep_loop;
276 }
277 #endif
278 powernv_states[nr_idle_states].exit_latency =
279 ((unsigned int)latency_ns[i]) / 1000;
280
281 if (!rc) {
282 powernv_states[nr_idle_states].target_residency =
283 ((unsigned int)residency_ns[i]) / 1000;
284 }
285
286 nr_idle_states++;
287 }
288
289 kfree(residency_ns);
290 out_free_latency:
291 kfree(latency_ns);
292 out_free_flags:
293 kfree(flags);
294 out:
295 return nr_idle_states;
296 }
297
298 /*
299 * powernv_idle_probe()
300 * Choose state table for shared versus dedicated partition
301 */
powernv_idle_probe(void)302 static int powernv_idle_probe(void)
303 {
304 if (cpuidle_disable != IDLE_NO_OVERRIDE)
305 return -ENODEV;
306
307 if (firmware_has_feature(FW_FEATURE_OPAL)) {
308 cpuidle_state_table = powernv_states;
309 /* Device tree can indicate more idle states */
310 max_idle_state = powernv_add_idle_states();
311 default_snooze_timeout = TICK_USEC * tb_ticks_per_usec;
312 if (max_idle_state > 1)
313 snooze_timeout_en = true;
314 } else
315 return -ENODEV;
316
317 return 0;
318 }
319
powernv_processor_idle_init(void)320 static int __init powernv_processor_idle_init(void)
321 {
322 int retval;
323
324 retval = powernv_idle_probe();
325 if (retval)
326 return retval;
327
328 powernv_cpuidle_driver_init();
329 retval = cpuidle_register(&powernv_idle_driver, NULL);
330 if (retval) {
331 printk(KERN_DEBUG "Registration of powernv driver failed.\n");
332 return retval;
333 }
334
335 register_cpu_notifier(&setup_hotplug_notifier);
336 printk(KERN_DEBUG "powernv_idle_driver registered\n");
337 return 0;
338 }
339
340 device_initcall(powernv_processor_idle_init);
341