1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Support for Partition Mobility/Migration
4 *
5 * Copyright (C) 2010 Nathan Fontenot
6 * Copyright (C) 2010 IBM Corporation
7 */
8
9
10 #define pr_fmt(fmt) "mobility: " fmt
11
12 #include <linux/cpu.h>
13 #include <linux/kernel.h>
14 #include <linux/kobject.h>
15 #include <linux/nmi.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 #include <linux/stat.h>
19 #include <linux/stop_machine.h>
20 #include <linux/completion.h>
21 #include <linux/device.h>
22 #include <linux/delay.h>
23 #include <linux/slab.h>
24 #include <linux/stringify.h>
25
26 #include <asm/machdep.h>
27 #include <asm/rtas.h>
28 #include "pseries.h"
29 #include "../../kernel/cacheinfo.h"
30
31 static struct kobject *mobility_kobj;
32
33 struct update_props_workarea {
34 __be32 phandle;
35 __be32 state;
36 __be64 reserved;
37 __be32 nprops;
38 } __packed;
39
40 #define NODE_ACTION_MASK 0xff000000
41 #define NODE_COUNT_MASK 0x00ffffff
42
43 #define DELETE_DT_NODE 0x01000000
44 #define UPDATE_DT_NODE 0x02000000
45 #define ADD_DT_NODE 0x03000000
46
47 #define MIGRATION_SCOPE (1)
48 #define PRRN_SCOPE -2
49
mobility_rtas_call(int token,char * buf,s32 scope)50 static int mobility_rtas_call(int token, char *buf, s32 scope)
51 {
52 int rc;
53
54 spin_lock(&rtas_data_buf_lock);
55
56 memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
57 rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
58 memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
59
60 spin_unlock(&rtas_data_buf_lock);
61 return rc;
62 }
63
delete_dt_node(struct device_node * dn)64 static int delete_dt_node(struct device_node *dn)
65 {
66 struct device_node *pdn;
67 bool is_platfac;
68
69 pdn = of_get_parent(dn);
70 is_platfac = of_node_is_type(dn, "ibm,platform-facilities") ||
71 of_node_is_type(pdn, "ibm,platform-facilities");
72 of_node_put(pdn);
73
74 /*
75 * The drivers that bind to nodes in the platform-facilities
76 * hierarchy don't support node removal, and the removal directive
77 * from firmware is always followed by an add of an equivalent
78 * node. The capability (e.g. RNG, encryption, compression)
79 * represented by the node is never interrupted by the migration.
80 * So ignore changes to this part of the tree.
81 */
82 if (is_platfac) {
83 pr_notice("ignoring remove operation for %pOFfp\n", dn);
84 return 0;
85 }
86
87 pr_debug("removing node %pOFfp\n", dn);
88 dlpar_detach_node(dn);
89 return 0;
90 }
91
update_dt_property(struct device_node * dn,struct property ** prop,const char * name,u32 vd,char * value)92 static int update_dt_property(struct device_node *dn, struct property **prop,
93 const char *name, u32 vd, char *value)
94 {
95 struct property *new_prop = *prop;
96 int more = 0;
97
98 /* A negative 'vd' value indicates that only part of the new property
99 * value is contained in the buffer and we need to call
100 * ibm,update-properties again to get the rest of the value.
101 *
102 * A negative value is also the two's compliment of the actual value.
103 */
104 if (vd & 0x80000000) {
105 vd = ~vd + 1;
106 more = 1;
107 }
108
109 if (new_prop) {
110 /* partial property fixup */
111 char *new_data = kzalloc(new_prop->length + vd, GFP_KERNEL);
112 if (!new_data)
113 return -ENOMEM;
114
115 memcpy(new_data, new_prop->value, new_prop->length);
116 memcpy(new_data + new_prop->length, value, vd);
117
118 kfree(new_prop->value);
119 new_prop->value = new_data;
120 new_prop->length += vd;
121 } else {
122 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
123 if (!new_prop)
124 return -ENOMEM;
125
126 new_prop->name = kstrdup(name, GFP_KERNEL);
127 if (!new_prop->name) {
128 kfree(new_prop);
129 return -ENOMEM;
130 }
131
132 new_prop->length = vd;
133 new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
134 if (!new_prop->value) {
135 kfree(new_prop->name);
136 kfree(new_prop);
137 return -ENOMEM;
138 }
139
140 memcpy(new_prop->value, value, vd);
141 *prop = new_prop;
142 }
143
144 if (!more) {
145 pr_debug("updating node %pOF property %s\n", dn, name);
146 of_update_property(dn, new_prop);
147 *prop = NULL;
148 }
149
150 return 0;
151 }
152
update_dt_node(struct device_node * dn,s32 scope)153 static int update_dt_node(struct device_node *dn, s32 scope)
154 {
155 struct update_props_workarea *upwa;
156 struct property *prop = NULL;
157 int i, rc, rtas_rc;
158 char *prop_data;
159 char *rtas_buf;
160 int update_properties_token;
161 u32 nprops;
162 u32 vd;
163
164 update_properties_token = rtas_token("ibm,update-properties");
165 if (update_properties_token == RTAS_UNKNOWN_SERVICE)
166 return -EINVAL;
167
168 rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
169 if (!rtas_buf)
170 return -ENOMEM;
171
172 upwa = (struct update_props_workarea *)&rtas_buf[0];
173 upwa->phandle = cpu_to_be32(dn->phandle);
174
175 do {
176 rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf,
177 scope);
178 if (rtas_rc < 0)
179 break;
180
181 prop_data = rtas_buf + sizeof(*upwa);
182 nprops = be32_to_cpu(upwa->nprops);
183
184 /* On the first call to ibm,update-properties for a node the
185 * the first property value descriptor contains an empty
186 * property name, the property value length encoded as u32,
187 * and the property value is the node path being updated.
188 */
189 if (*prop_data == 0) {
190 prop_data++;
191 vd = be32_to_cpu(*(__be32 *)prop_data);
192 prop_data += vd + sizeof(vd);
193 nprops--;
194 }
195
196 for (i = 0; i < nprops; i++) {
197 char *prop_name;
198
199 prop_name = prop_data;
200 prop_data += strlen(prop_name) + 1;
201 vd = be32_to_cpu(*(__be32 *)prop_data);
202 prop_data += sizeof(vd);
203
204 switch (vd) {
205 case 0x00000000:
206 /* name only property, nothing to do */
207 break;
208
209 case 0x80000000:
210 of_remove_property(dn, of_find_property(dn,
211 prop_name, NULL));
212 prop = NULL;
213 break;
214
215 default:
216 rc = update_dt_property(dn, &prop, prop_name,
217 vd, prop_data);
218 if (rc) {
219 pr_err("updating %s property failed: %d\n",
220 prop_name, rc);
221 }
222
223 prop_data += vd;
224 break;
225 }
226
227 cond_resched();
228 }
229
230 cond_resched();
231 } while (rtas_rc == 1);
232
233 kfree(rtas_buf);
234 return 0;
235 }
236
add_dt_node(struct device_node * parent_dn,__be32 drc_index)237 static int add_dt_node(struct device_node *parent_dn, __be32 drc_index)
238 {
239 struct device_node *dn;
240 int rc;
241
242 dn = dlpar_configure_connector(drc_index, parent_dn);
243 if (!dn)
244 return -ENOENT;
245
246 /*
247 * Since delete_dt_node() ignores this node type, this is the
248 * necessary counterpart. We also know that a platform-facilities
249 * node returned from dlpar_configure_connector() has children
250 * attached, and dlpar_attach_node() only adds the parent, leaking
251 * the children. So ignore these on the add side for now.
252 */
253 if (of_node_is_type(dn, "ibm,platform-facilities")) {
254 pr_notice("ignoring add operation for %pOF\n", dn);
255 dlpar_free_cc_nodes(dn);
256 return 0;
257 }
258
259 rc = dlpar_attach_node(dn, parent_dn);
260 if (rc)
261 dlpar_free_cc_nodes(dn);
262
263 pr_debug("added node %pOFfp\n", dn);
264
265 return rc;
266 }
267
pseries_devicetree_update(s32 scope)268 int pseries_devicetree_update(s32 scope)
269 {
270 char *rtas_buf;
271 __be32 *data;
272 int update_nodes_token;
273 int rc;
274
275 update_nodes_token = rtas_token("ibm,update-nodes");
276 if (update_nodes_token == RTAS_UNKNOWN_SERVICE)
277 return 0;
278
279 rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
280 if (!rtas_buf)
281 return -ENOMEM;
282
283 do {
284 rc = mobility_rtas_call(update_nodes_token, rtas_buf, scope);
285 if (rc && rc != 1)
286 break;
287
288 data = (__be32 *)rtas_buf + 4;
289 while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
290 int i;
291 u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
292 u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
293
294 data++;
295
296 for (i = 0; i < node_count; i++) {
297 struct device_node *np;
298 __be32 phandle = *data++;
299 __be32 drc_index;
300
301 np = of_find_node_by_phandle(be32_to_cpu(phandle));
302 if (!np) {
303 pr_warn("Failed lookup: phandle 0x%x for action 0x%x\n",
304 be32_to_cpu(phandle), action);
305 continue;
306 }
307
308 switch (action) {
309 case DELETE_DT_NODE:
310 delete_dt_node(np);
311 break;
312 case UPDATE_DT_NODE:
313 update_dt_node(np, scope);
314 break;
315 case ADD_DT_NODE:
316 drc_index = *data++;
317 add_dt_node(np, drc_index);
318 break;
319 }
320
321 of_node_put(np);
322 cond_resched();
323 }
324 }
325
326 cond_resched();
327 } while (rc == 1);
328
329 kfree(rtas_buf);
330 return rc;
331 }
332
post_mobility_fixup(void)333 void post_mobility_fixup(void)
334 {
335 int rc;
336
337 rtas_activate_firmware();
338
339 /*
340 * We don't want CPUs to go online/offline while the device
341 * tree is being updated.
342 */
343 cpus_read_lock();
344
345 /*
346 * It's common for the destination firmware to replace cache
347 * nodes. Release all of the cacheinfo hierarchy's references
348 * before updating the device tree.
349 */
350 cacheinfo_teardown();
351
352 rc = pseries_devicetree_update(MIGRATION_SCOPE);
353 if (rc)
354 pr_err("device tree update failed: %d\n", rc);
355
356 cacheinfo_rebuild();
357
358 cpus_read_unlock();
359
360 /* Possibly switch to a new L1 flush type */
361 pseries_setup_security_mitigations();
362
363 /* Reinitialise system information for hv-24x7 */
364 read_24x7_sys_info();
365
366 return;
367 }
368
poll_vasi_state(u64 handle,unsigned long * res)369 static int poll_vasi_state(u64 handle, unsigned long *res)
370 {
371 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
372 long hvrc;
373 int ret;
374
375 hvrc = plpar_hcall(H_VASI_STATE, retbuf, handle);
376 switch (hvrc) {
377 case H_SUCCESS:
378 ret = 0;
379 *res = retbuf[0];
380 break;
381 case H_PARAMETER:
382 ret = -EINVAL;
383 break;
384 case H_FUNCTION:
385 ret = -EOPNOTSUPP;
386 break;
387 case H_HARDWARE:
388 default:
389 pr_err("unexpected H_VASI_STATE result %ld\n", hvrc);
390 ret = -EIO;
391 break;
392 }
393 return ret;
394 }
395
wait_for_vasi_session_suspending(u64 handle)396 static int wait_for_vasi_session_suspending(u64 handle)
397 {
398 unsigned long state;
399 int ret;
400
401 /*
402 * Wait for transition from H_VASI_ENABLED to
403 * H_VASI_SUSPENDING. Treat anything else as an error.
404 */
405 while (true) {
406 ret = poll_vasi_state(handle, &state);
407
408 if (ret != 0 || state == H_VASI_SUSPENDING) {
409 break;
410 } else if (state == H_VASI_ENABLED) {
411 ssleep(1);
412 } else {
413 pr_err("unexpected H_VASI_STATE result %lu\n", state);
414 ret = -EIO;
415 break;
416 }
417 }
418
419 /*
420 * Proceed even if H_VASI_STATE is unavailable. If H_JOIN or
421 * ibm,suspend-me are also unimplemented, we'll recover then.
422 */
423 if (ret == -EOPNOTSUPP)
424 ret = 0;
425
426 return ret;
427 }
428
prod_single(unsigned int target_cpu)429 static void prod_single(unsigned int target_cpu)
430 {
431 long hvrc;
432 int hwid;
433
434 hwid = get_hard_smp_processor_id(target_cpu);
435 hvrc = plpar_hcall_norets(H_PROD, hwid);
436 if (hvrc == H_SUCCESS)
437 return;
438 pr_err_ratelimited("H_PROD of CPU %u (hwid %d) error: %ld\n",
439 target_cpu, hwid, hvrc);
440 }
441
prod_others(void)442 static void prod_others(void)
443 {
444 unsigned int cpu;
445
446 for_each_online_cpu(cpu) {
447 if (cpu != smp_processor_id())
448 prod_single(cpu);
449 }
450 }
451
clamp_slb_size(void)452 static u16 clamp_slb_size(void)
453 {
454 u16 prev = mmu_slb_size;
455
456 slb_set_size(SLB_MIN_SIZE);
457
458 return prev;
459 }
460
do_suspend(void)461 static int do_suspend(void)
462 {
463 u16 saved_slb_size;
464 int status;
465 int ret;
466
467 pr_info("calling ibm,suspend-me on CPU %i\n", smp_processor_id());
468
469 /*
470 * The destination processor model may have fewer SLB entries
471 * than the source. We reduce mmu_slb_size to a safe minimum
472 * before suspending in order to minimize the possibility of
473 * programming non-existent entries on the destination. If
474 * suspend fails, we restore it before returning. On success
475 * the OF reconfig path will update it from the new device
476 * tree after resuming on the destination.
477 */
478 saved_slb_size = clamp_slb_size();
479
480 ret = rtas_ibm_suspend_me(&status);
481 if (ret != 0) {
482 pr_err("ibm,suspend-me error: %d\n", status);
483 slb_set_size(saved_slb_size);
484 }
485
486 return ret;
487 }
488
489 /**
490 * struct pseries_suspend_info - State shared between CPUs for join/suspend.
491 * @counter: Threads are to increment this upon resuming from suspend
492 * or if an error is received from H_JOIN. The thread which performs
493 * the first increment (i.e. sets it to 1) is responsible for
494 * waking the other threads.
495 * @done: False if join/suspend is in progress. True if the operation is
496 * complete (successful or not).
497 */
498 struct pseries_suspend_info {
499 atomic_t counter;
500 bool done;
501 };
502
do_join(void * arg)503 static int do_join(void *arg)
504 {
505 struct pseries_suspend_info *info = arg;
506 atomic_t *counter = &info->counter;
507 long hvrc;
508 int ret;
509
510 retry:
511 /* Must ensure MSR.EE off for H_JOIN. */
512 hard_irq_disable();
513 hvrc = plpar_hcall_norets(H_JOIN);
514
515 switch (hvrc) {
516 case H_CONTINUE:
517 /*
518 * All other CPUs are offline or in H_JOIN. This CPU
519 * attempts the suspend.
520 */
521 ret = do_suspend();
522 break;
523 case H_SUCCESS:
524 /*
525 * The suspend is complete and this cpu has received a
526 * prod, or we've received a stray prod from unrelated
527 * code (e.g. paravirt spinlocks) and we need to join
528 * again.
529 *
530 * This barrier orders the return from H_JOIN above vs
531 * the load of info->done. It pairs with the barrier
532 * in the wakeup/prod path below.
533 */
534 smp_mb();
535 if (READ_ONCE(info->done) == false) {
536 pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
537 smp_processor_id());
538 goto retry;
539 }
540 ret = 0;
541 break;
542 case H_BAD_MODE:
543 case H_HARDWARE:
544 default:
545 ret = -EIO;
546 pr_err_ratelimited("H_JOIN error %ld on CPU %i\n",
547 hvrc, smp_processor_id());
548 break;
549 }
550
551 if (atomic_inc_return(counter) == 1) {
552 pr_info("CPU %u waking all threads\n", smp_processor_id());
553 WRITE_ONCE(info->done, true);
554 /*
555 * This barrier orders the store to info->done vs subsequent
556 * H_PRODs to wake the other CPUs. It pairs with the barrier
557 * in the H_SUCCESS case above.
558 */
559 smp_mb();
560 prod_others();
561 }
562 /*
563 * Execution may have been suspended for several seconds, so
564 * reset the watchdog.
565 */
566 touch_nmi_watchdog();
567 return ret;
568 }
569
570 /*
571 * Abort reason code byte 0. We use only the 'Migrating partition' value.
572 */
573 enum vasi_aborting_entity {
574 ORCHESTRATOR = 1,
575 VSP_SOURCE = 2,
576 PARTITION_FIRMWARE = 3,
577 PLATFORM_FIRMWARE = 4,
578 VSP_TARGET = 5,
579 MIGRATING_PARTITION = 6,
580 };
581
pseries_cancel_migration(u64 handle,int err)582 static void pseries_cancel_migration(u64 handle, int err)
583 {
584 u32 reason_code;
585 u32 detail;
586 u8 entity;
587 long hvrc;
588
589 entity = MIGRATING_PARTITION;
590 detail = abs(err) & 0xffffff;
591 reason_code = (entity << 24) | detail;
592
593 hvrc = plpar_hcall_norets(H_VASI_SIGNAL, handle,
594 H_VASI_SIGNAL_CANCEL, reason_code);
595 if (hvrc)
596 pr_err("H_VASI_SIGNAL error: %ld\n", hvrc);
597 }
598
pseries_suspend(u64 handle)599 static int pseries_suspend(u64 handle)
600 {
601 const unsigned int max_attempts = 5;
602 unsigned int retry_interval_ms = 1;
603 unsigned int attempt = 1;
604 int ret;
605
606 while (true) {
607 struct pseries_suspend_info info;
608 unsigned long vasi_state;
609 int vasi_err;
610
611 info = (struct pseries_suspend_info) {
612 .counter = ATOMIC_INIT(0),
613 .done = false,
614 };
615
616 ret = stop_machine(do_join, &info, cpu_online_mask);
617 if (ret == 0)
618 break;
619 /*
620 * Encountered an error. If the VASI stream is still
621 * in Suspending state, it's likely a transient
622 * condition related to some device in the partition
623 * and we can retry in the hope that the cause has
624 * cleared after some delay.
625 *
626 * A better design would allow drivers etc to prepare
627 * for the suspend and avoid conditions which prevent
628 * the suspend from succeeding. For now, we have this
629 * mitigation.
630 */
631 pr_notice("Partition suspend attempt %u of %u error: %d\n",
632 attempt, max_attempts, ret);
633
634 if (attempt == max_attempts)
635 break;
636
637 vasi_err = poll_vasi_state(handle, &vasi_state);
638 if (vasi_err == 0) {
639 if (vasi_state != H_VASI_SUSPENDING) {
640 pr_notice("VASI state %lu after failed suspend\n",
641 vasi_state);
642 break;
643 }
644 } else if (vasi_err != -EOPNOTSUPP) {
645 pr_err("VASI state poll error: %d", vasi_err);
646 break;
647 }
648
649 pr_notice("Will retry partition suspend after %u ms\n",
650 retry_interval_ms);
651
652 msleep(retry_interval_ms);
653 retry_interval_ms *= 10;
654 attempt++;
655 }
656
657 return ret;
658 }
659
pseries_migrate_partition(u64 handle)660 static int pseries_migrate_partition(u64 handle)
661 {
662 int ret;
663
664 ret = wait_for_vasi_session_suspending(handle);
665 if (ret)
666 return ret;
667
668 ret = pseries_suspend(handle);
669 if (ret == 0)
670 post_mobility_fixup();
671 else
672 pseries_cancel_migration(handle, ret);
673
674 return ret;
675 }
676
rtas_syscall_dispatch_ibm_suspend_me(u64 handle)677 int rtas_syscall_dispatch_ibm_suspend_me(u64 handle)
678 {
679 return pseries_migrate_partition(handle);
680 }
681
migration_store(struct class * class,struct class_attribute * attr,const char * buf,size_t count)682 static ssize_t migration_store(struct class *class,
683 struct class_attribute *attr, const char *buf,
684 size_t count)
685 {
686 u64 streamid;
687 int rc;
688
689 rc = kstrtou64(buf, 0, &streamid);
690 if (rc)
691 return rc;
692
693 rc = pseries_migrate_partition(streamid);
694 if (rc)
695 return rc;
696
697 return count;
698 }
699
700 /*
701 * Used by drmgr to determine the kernel behavior of the migration interface.
702 *
703 * Version 1: Performs all PAPR requirements for migration including
704 * firmware activation and device tree update.
705 */
706 #define MIGRATION_API_VERSION 1
707
708 static CLASS_ATTR_WO(migration);
709 static CLASS_ATTR_STRING(api_version, 0444, __stringify(MIGRATION_API_VERSION));
710
mobility_sysfs_init(void)711 static int __init mobility_sysfs_init(void)
712 {
713 int rc;
714
715 mobility_kobj = kobject_create_and_add("mobility", kernel_kobj);
716 if (!mobility_kobj)
717 return -ENOMEM;
718
719 rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr);
720 if (rc)
721 pr_err("unable to create migration sysfs file (%d)\n", rc);
722
723 rc = sysfs_create_file(mobility_kobj, &class_attr_api_version.attr.attr);
724 if (rc)
725 pr_err("unable to create api_version sysfs file (%d)\n", rc);
726
727 return 0;
728 }
729 machine_device_initcall(pseries, mobility_sysfs_init);
730