• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <nvhe/clock.h>
2 #include <nvhe/mem_protect.h>
3 #include <nvhe/mm.h>
4 #include <nvhe/trace.h>
5 
6 #include <asm/kvm_mmu.h>
7 #include <asm/local.h>
8 
9 #include <linux/ring_buffer.h>
10 
11 #define HYP_RB_PAGE_HEAD		1UL
12 #define HYP_RB_PAGE_UPDATE		2UL
13 #define HYP_RB_FLAG_MASK		3UL
14 
15 static struct hyp_buffer_pages_backing hyp_buffer_pages_backing;
16 DEFINE_PER_CPU(struct hyp_rb_per_cpu, trace_rb);
17 DEFINE_HYP_SPINLOCK(trace_rb_lock);
18 
rb_set_flag(struct hyp_buffer_page * bpage,int new_flag)19 static bool rb_set_flag(struct hyp_buffer_page *bpage, int new_flag)
20 {
21 	unsigned long ret, val = (unsigned long)bpage->list.next;
22 
23 	ret = cmpxchg((unsigned long *)&bpage->list.next,
24 		      val, (val & ~HYP_RB_FLAG_MASK) | new_flag);
25 
26 	return ret == val;
27 }
28 
rb_set_footer_status(struct hyp_buffer_page * bpage,unsigned long status,bool reader)29 static void rb_set_footer_status(struct hyp_buffer_page *bpage,
30 				 unsigned long status,
31 				 bool reader)
32 {
33 	struct buffer_data_page *page = bpage->page;
34 	struct rb_ext_page_footer *footer;
35 
36 	footer = rb_ext_page_get_footer(page);
37 
38 	if (reader)
39 		atomic_set(&footer->reader_status, status);
40 	else
41 		atomic_set(&footer->writer_status, status);
42 }
43 
rb_footer_writer_status(struct hyp_buffer_page * bpage,unsigned long status)44 static void rb_footer_writer_status(struct hyp_buffer_page *bpage,
45 				    unsigned long status)
46 {
47 	rb_set_footer_status(bpage, status, false);
48 }
49 
rb_footer_reader_status(struct hyp_buffer_page * bpage,unsigned long status)50 static void rb_footer_reader_status(struct hyp_buffer_page *bpage,
51 				    unsigned long status)
52 {
53 	rb_set_footer_status(bpage, status, true);
54 }
55 
rb_hyp_buffer_page(struct list_head * list)56 static struct hyp_buffer_page *rb_hyp_buffer_page(struct list_head *list)
57 {
58 	unsigned long ptr = (unsigned long)list & ~HYP_RB_FLAG_MASK;
59 
60 	return container_of((struct list_head *)ptr, struct hyp_buffer_page, list);
61 }
62 
rb_next_page(struct hyp_buffer_page * bpage)63 static struct hyp_buffer_page *rb_next_page(struct hyp_buffer_page *bpage)
64 {
65 	return rb_hyp_buffer_page(bpage->list.next);
66 }
67 
rb_is_head_page(struct hyp_buffer_page * bpage)68 static bool rb_is_head_page(struct hyp_buffer_page *bpage)
69 {
70 	return (unsigned long)bpage->list.prev->next & HYP_RB_PAGE_HEAD;
71 }
72 
rb_set_head_page(struct hyp_rb_per_cpu * cpu_buffer)73 static struct hyp_buffer_page *rb_set_head_page(struct hyp_rb_per_cpu *cpu_buffer)
74 {
75 	struct hyp_buffer_page *bpage, *prev_head;
76 	int cnt = 0;
77 again:
78 	bpage = prev_head = cpu_buffer->head_page;
79 	do {
80 		if (rb_is_head_page(bpage)) {
81 			cpu_buffer->head_page = bpage;
82 			rb_footer_reader_status(prev_head, 0);
83 			rb_footer_reader_status(bpage, RB_PAGE_FT_HEAD);
84 			return bpage;
85 		}
86 
87 		bpage = rb_next_page(bpage);
88 	} while (bpage != prev_head);
89 
90 	cnt++;
91 
92 	/* We might have race with the writer let's try again */
93 	if (cnt < 3)
94 		goto again;
95 
96 	return NULL;
97 }
98 
rb_swap_reader_page(struct hyp_rb_per_cpu * cpu_buffer)99 static int rb_swap_reader_page(struct hyp_rb_per_cpu *cpu_buffer)
100 {
101 	unsigned long *old_head_link, old_link_val, new_link_val, overrun;
102 	struct hyp_buffer_page *head, *reader = cpu_buffer->reader_page;
103 	struct rb_ext_page_footer *footer;
104 
105 	rb_footer_reader_status(cpu_buffer->reader_page, 0);
106 spin:
107 	/* Update the cpu_buffer->header_page according to HYP_RB_PAGE_HEAD */
108 	head = rb_set_head_page(cpu_buffer);
109 	if (!head)
110 		return -ENODEV;
111 
112 	/* Connect the reader page around the header page */
113 	reader->list.next = head->list.next;
114 	reader->list.prev = head->list.prev;
115 
116 	/* The reader page points to the new header page */
117 	rb_set_flag(reader, HYP_RB_PAGE_HEAD);
118 
119 	/*
120 	 * Paired with the cmpxchg in rb_move_tail(). Order the read of the head
121 	 * page and overrun.
122 	 */
123 	smp_mb();
124 	overrun = atomic_read(&cpu_buffer->overrun);
125 
126 	/* Try to swap the prev head link to the reader page */
127 	old_head_link = (unsigned long *)&reader->list.prev->next;
128 	old_link_val = (*old_head_link & ~HYP_RB_FLAG_MASK) | HYP_RB_PAGE_HEAD;
129 	new_link_val = (unsigned long)&reader->list;
130 	if (cmpxchg(old_head_link, old_link_val, new_link_val)
131 		      != old_link_val)
132 		goto spin;
133 
134 	cpu_buffer->head_page = rb_hyp_buffer_page(reader->list.next);
135 	cpu_buffer->head_page->list.prev = &reader->list;
136 	cpu_buffer->reader_page = head;
137 
138 	rb_footer_reader_status(cpu_buffer->reader_page, RB_PAGE_FT_READER);
139 	rb_footer_reader_status(cpu_buffer->head_page, RB_PAGE_FT_HEAD);
140 
141 	footer = rb_ext_page_get_footer(cpu_buffer->reader_page->page);
142 	footer->stats.overrun = overrun;
143 
144 	return 0;
145 }
146 
147 static struct hyp_buffer_page *
rb_move_tail(struct hyp_rb_per_cpu * cpu_buffer)148 rb_move_tail(struct hyp_rb_per_cpu *cpu_buffer)
149 {
150 	struct hyp_buffer_page *tail_page, *new_tail, *new_head;
151 
152 	tail_page = cpu_buffer->tail_page;
153 	new_tail = rb_next_page(tail_page);
154 again:
155 	/*
156 	 * We caught the reader ... Let's try to move the head page.
157 	 * The writer can only rely on ->next links to check if this is head.
158 	 */
159 	if ((unsigned long)tail_page->list.next & HYP_RB_PAGE_HEAD) {
160 		/* The reader moved the head in between */
161 		if (!rb_set_flag(tail_page, HYP_RB_PAGE_UPDATE))
162 			goto again;
163 
164 		atomic_add(atomic_read(&new_tail->entries), &cpu_buffer->overrun);
165 
166 		/* Move the head */
167 		rb_set_flag(new_tail, HYP_RB_PAGE_HEAD);
168 
169 		/* The new head is in place, reset the update flag */
170 		rb_set_flag(tail_page, 0);
171 
172 		new_head = rb_next_page(new_tail);
173 	}
174 
175 	rb_footer_writer_status(tail_page, 0);
176 	rb_footer_writer_status(new_tail, RB_PAGE_FT_COMMIT);
177 
178 	local_set(&new_tail->page->commit, 0);
179 
180 	atomic_set(&new_tail->write, 0);
181 	atomic_set(&new_tail->entries, 0);
182 
183 	atomic_inc(&cpu_buffer->pages_touched);
184 
185 	cpu_buffer->tail_page = new_tail;
186 
187 	return new_tail;
188 }
189 
rb_event_size(unsigned long length)190 unsigned long rb_event_size(unsigned long length)
191 {
192 	struct ring_buffer_event *event;
193 
194 	return length + RB_EVNT_HDR_SIZE + sizeof(event->array[0]);
195 }
196 
197 static struct ring_buffer_event *
rb_add_ts_extend(struct ring_buffer_event * event,u64 delta)198 rb_add_ts_extend(struct ring_buffer_event *event, u64 delta)
199 {
200 	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
201 	event->time_delta = delta & TS_MASK;
202 	event->array[0] = delta >> TS_SHIFT;
203 
204 	return (struct ring_buffer_event *)((unsigned long)event + 8);
205 }
206 
207 static struct ring_buffer_event *
rb_reserve_next(struct hyp_rb_per_cpu * cpu_buffer,unsigned long length)208 rb_reserve_next(struct hyp_rb_per_cpu *cpu_buffer, unsigned long length)
209 {
210 	unsigned long ts_ext_size = 0, event_size = rb_event_size(length);
211 	struct hyp_buffer_page *tail_page = cpu_buffer->tail_page;
212 	struct ring_buffer_event *event;
213 	unsigned long write, prev_write;
214 	u64 ts, time_delta;
215 
216 	ts = trace_clock();
217 
218 	time_delta = ts - atomic64_read(&cpu_buffer->write_stamp);
219 
220 	if (test_time_stamp(time_delta))
221 		ts_ext_size = 8;
222 
223 	prev_write = atomic_read(&tail_page->write);
224 	write = prev_write + event_size + ts_ext_size;
225 
226 	if (unlikely(write > BUF_EXT_PAGE_SIZE))
227 		tail_page = rb_move_tail(cpu_buffer);
228 
229 	if (!atomic_read(&tail_page->entries)) {
230 		tail_page->page->time_stamp = ts;
231 		time_delta = 0;
232 		ts_ext_size = 0;
233 		write = event_size;
234 		prev_write = 0;
235 	}
236 
237 	atomic_set(&tail_page->write, write);
238 	atomic_inc(&tail_page->entries);
239 
240 	local_set(&tail_page->page->commit, write);
241 
242 	atomic_inc(&cpu_buffer->nr_entries);
243 	atomic64_set(&cpu_buffer->write_stamp, ts);
244 
245 	event = (struct ring_buffer_event *)(tail_page->page->data +
246 					     prev_write);
247 	if (ts_ext_size) {
248 		event = rb_add_ts_extend(event, time_delta);
249 		time_delta = 0;
250 	}
251 
252 	event->type_len = 0;
253 	event->time_delta = time_delta;
254 	event->array[0] = event_size - RB_EVNT_HDR_SIZE;
255 
256 	return event;
257 }
258 
259 void *
rb_reserve_trace_entry(struct hyp_rb_per_cpu * cpu_buffer,unsigned long length)260 rb_reserve_trace_entry(struct hyp_rb_per_cpu *cpu_buffer, unsigned long length)
261 {
262 	struct ring_buffer_event *rb_event;
263 
264 	rb_event = rb_reserve_next(cpu_buffer, length);
265 
266 	return &rb_event->array[1];
267 }
268 
rb_update_footers(struct hyp_rb_per_cpu * cpu_buffer)269 static int rb_update_footers(struct hyp_rb_per_cpu *cpu_buffer)
270 {
271 	unsigned long entries, pages_touched, overrun;
272 	struct rb_ext_page_footer *footer;
273 	struct buffer_data_page *reader;
274 
275 	if (!rb_set_head_page(cpu_buffer))
276 		return -ENODEV;
277 
278 	reader = cpu_buffer->reader_page->page;
279 	footer = rb_ext_page_get_footer(reader);
280 
281 	entries = atomic_read(&cpu_buffer->nr_entries);
282 	footer->stats.entries = entries;
283 	pages_touched = atomic_read(&cpu_buffer->pages_touched);
284 	footer->stats.pages_touched = pages_touched;
285 	overrun = atomic_read(&cpu_buffer->overrun);
286 	footer->stats.overrun = overrun;
287 
288 	return 0;
289 }
290 
rb_page_init(struct hyp_buffer_page * bpage,unsigned long hva)291 static int rb_page_init(struct hyp_buffer_page *bpage, unsigned long hva)
292 {
293 	void *hyp_va = (void *)kern_hyp_va(hva);
294 	int ret;
295 
296 	ret = hyp_pin_shared_mem(hyp_va, hyp_va + PAGE_SIZE);
297 	if (ret)
298 		return ret;
299 
300 	INIT_LIST_HEAD(&bpage->list);
301 	bpage->page = (struct buffer_data_page *)hyp_va;
302 
303 	atomic_set(&bpage->write, 0);
304 
305 	rb_footer_reader_status(bpage, 0);
306 	rb_footer_writer_status(bpage, 0);
307 
308 	return 0;
309 }
310 
rb_cpu_loaded(struct hyp_rb_per_cpu * cpu_buffer)311 static bool rb_cpu_loaded(struct hyp_rb_per_cpu *cpu_buffer)
312 {
313 	return cpu_buffer->bpages;
314 }
315 
rb_cpu_disable(struct hyp_rb_per_cpu * cpu_buffer)316 static void rb_cpu_disable(struct hyp_rb_per_cpu *cpu_buffer)
317 {
318 	unsigned int prev_status;
319 
320 	/* Wait for release of the buffer */
321 	do {
322 		/* Paired with __stop_write_hyp_rb */
323 		prev_status = atomic_cmpxchg_acquire(&cpu_buffer->status,
324 						     HYP_RB_WRITABLE,
325 						     HYP_RB_NONWRITABLE);
326 	} while (prev_status == HYP_RB_WRITING);
327 
328 	if (prev_status == HYP_RB_WRITABLE)
329 		rb_update_footers(cpu_buffer);
330 }
331 
rb_cpu_enable(struct hyp_rb_per_cpu * cpu_buffer)332 static int rb_cpu_enable(struct hyp_rb_per_cpu *cpu_buffer)
333 {
334 	unsigned int prev_status;
335 
336 	if (!rb_cpu_loaded(cpu_buffer))
337 		return -EINVAL;
338 
339 	prev_status = atomic_cmpxchg(&cpu_buffer->status,
340 				     HYP_RB_NONWRITABLE, HYP_RB_WRITABLE);
341 
342 	if (prev_status == HYP_RB_NONWRITABLE)
343 		return 0;
344 
345 	return -EINVAL;
346 }
347 
rb_cpu_teardown(struct hyp_rb_per_cpu * cpu_buffer)348 static void rb_cpu_teardown(struct hyp_rb_per_cpu *cpu_buffer)
349 {
350 	int i;
351 
352 	if (!rb_cpu_loaded(cpu_buffer))
353 		return;
354 
355 	rb_cpu_disable(cpu_buffer);
356 
357 	for (i = 0; i < cpu_buffer->nr_pages; i++) {
358 		struct hyp_buffer_page *bpage = &cpu_buffer->bpages[i];
359 
360 		if (!bpage->page)
361 			continue;
362 
363 		hyp_unpin_shared_mem((void *)bpage->page,
364 				     (void *)bpage->page + PAGE_SIZE);
365 	}
366 
367 	cpu_buffer->bpages = NULL;
368 }
369 
rb_cpu_fits_backing(unsigned long nr_pages,struct hyp_buffer_page * start)370 static bool rb_cpu_fits_backing(unsigned long nr_pages,
371 			        struct hyp_buffer_page *start)
372 {
373 	unsigned long max = hyp_buffer_pages_backing.start +
374 			    hyp_buffer_pages_backing.size;
375 	struct hyp_buffer_page *end = start + nr_pages;
376 
377 	return (unsigned long)end <= max;
378 }
379 
rb_cpu_fits_pack(struct ring_buffer_pack * rb_pack,unsigned long pack_end)380 static bool rb_cpu_fits_pack(struct ring_buffer_pack *rb_pack,
381 			     unsigned long pack_end)
382 {
383 	unsigned long *end;
384 
385 	/* Check we can at least read nr_pages */
386 	if ((unsigned long)&rb_pack->nr_pages >= pack_end)
387 		return false;
388 
389 	end = &rb_pack->page_va[rb_pack->nr_pages];
390 
391 	return (unsigned long)end <= pack_end;
392 }
393 
rb_cpu_init(struct ring_buffer_pack * rb_pack,struct hyp_buffer_page * start,struct hyp_rb_per_cpu * cpu_buffer)394 static int rb_cpu_init(struct ring_buffer_pack *rb_pack, struct hyp_buffer_page *start,
395 		       struct hyp_rb_per_cpu *cpu_buffer)
396 {
397 	struct hyp_buffer_page *bpage = start;
398 	int i, ret;
399 
400 	if (!rb_pack->nr_pages ||
401 	    !rb_cpu_fits_backing(rb_pack->nr_pages + 1, start))
402 		return -EINVAL;
403 
404 	memset(cpu_buffer, 0, sizeof(*cpu_buffer));
405 
406 	cpu_buffer->bpages = start;
407 	cpu_buffer->nr_pages = rb_pack->nr_pages + 1;
408 
409 	/* The reader page is not part of the ring initially */
410 	ret = rb_page_init(bpage, rb_pack->reader_page_va);
411 	if (ret)
412 		return ret;
413 
414 	cpu_buffer->reader_page = bpage;
415 	cpu_buffer->tail_page = bpage + 1;
416 	cpu_buffer->head_page = bpage + 1;
417 
418 	for (i = 0; i < rb_pack->nr_pages; i++) {
419 		ret = rb_page_init(++bpage, rb_pack->page_va[i]);
420 		if (ret)
421 			goto err;
422 
423 		bpage->list.next = &(bpage + 1)->list;
424 		bpage->list.prev = &(bpage - 1)->list;
425 	}
426 
427 	/* Close the ring */
428 	bpage->list.next = &cpu_buffer->tail_page->list;
429 	cpu_buffer->tail_page->list.prev = &bpage->list;
430 
431 	/* The last init'ed page points to the head page */
432 	rb_set_flag(bpage, HYP_RB_PAGE_HEAD);
433 
434 	rb_footer_reader_status(cpu_buffer->reader_page, RB_PAGE_FT_READER);
435 	rb_footer_reader_status(cpu_buffer->head_page, RB_PAGE_FT_HEAD);
436 	rb_footer_writer_status(cpu_buffer->head_page, RB_PAGE_FT_COMMIT);
437 
438 	atomic_set(&cpu_buffer->overrun, 0);
439 	atomic64_set(&cpu_buffer->write_stamp, 0);
440 
441 	return 0;
442 err:
443 	rb_cpu_teardown(cpu_buffer);
444 
445 	return ret;
446 }
447 
rb_setup_bpage_backing(struct hyp_trace_pack * pack)448 static int rb_setup_bpage_backing(struct hyp_trace_pack *pack)
449 {
450 	unsigned long start = kern_hyp_va(pack->backing.start);
451 	size_t size = pack->backing.size;
452 	int ret;
453 
454 	if (hyp_buffer_pages_backing.size)
455 		return -EBUSY;
456 
457 	if (!PAGE_ALIGNED(start) || !PAGE_ALIGNED(size))
458 		return -EINVAL;
459 
460 	ret = __pkvm_host_donate_hyp(hyp_virt_to_pfn((void *)start), size >> PAGE_SHIFT);
461 	if (ret)
462 		return ret;
463 
464 	memset((void *)start, 0, size);
465 
466 	hyp_buffer_pages_backing.start = start;
467 	hyp_buffer_pages_backing.size = size;
468 
469 	return 0;
470 }
471 
rb_teardown_bpage_backing(void)472 static void rb_teardown_bpage_backing(void)
473 {
474 	unsigned long start = hyp_buffer_pages_backing.start;
475 	size_t size = hyp_buffer_pages_backing.size;
476 
477 	if (!size)
478 		return;
479 
480 	memset((void *)start, 0, size);
481 
482 	WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(start), size >> PAGE_SHIFT));
483 
484 	hyp_buffer_pages_backing.start = 0;
485 	hyp_buffer_pages_backing.size = 0;
486 }
487 
__pkvm_rb_update_footers(int cpu)488 int __pkvm_rb_update_footers(int cpu)
489 {
490 	struct hyp_rb_per_cpu *cpu_buffer;
491 	int ret = 0;
492 
493 	if (cpu >= hyp_nr_cpus)
494 		return -EINVAL;
495 
496 	/* TODO: per-CPU lock for */
497 	hyp_spin_lock(&trace_rb_lock);
498 
499 	cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
500 
501 	if (!rb_cpu_loaded(cpu_buffer))
502 		ret = -ENODEV;
503 	else
504 		ret = rb_update_footers(cpu_buffer);
505 
506 	hyp_spin_unlock(&trace_rb_lock);
507 
508 	return ret;
509 }
510 
__pkvm_rb_swap_reader_page(int cpu)511 int __pkvm_rb_swap_reader_page(int cpu)
512 {
513 	struct hyp_rb_per_cpu *cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
514 	int ret = 0;
515 
516 	if (cpu >= hyp_nr_cpus)
517 		return -EINVAL;
518 
519 	/* TODO: per-CPU lock for */
520 	hyp_spin_lock(&trace_rb_lock);
521 
522 	cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
523 
524 	if (!rb_cpu_loaded(cpu_buffer))
525 		ret = -ENODEV;
526 	else
527 		ret = rb_swap_reader_page(cpu_buffer);
528 
529 	hyp_spin_unlock(&trace_rb_lock);
530 
531 	return ret;
532 }
533 
__pkvm_teardown_tracing_locked(void)534 static void __pkvm_teardown_tracing_locked(void)
535 {
536 	int cpu;
537 
538 	hyp_assert_lock_held(&trace_rb_lock);
539 
540 	for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
541 		struct hyp_rb_per_cpu *cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
542 
543 		rb_cpu_teardown(cpu_buffer);
544 	}
545 
546 	rb_teardown_bpage_backing();
547 }
548 
__pkvm_teardown_tracing(void)549 void __pkvm_teardown_tracing(void)
550 {
551 	hyp_spin_lock(&trace_rb_lock);
552 	__pkvm_teardown_tracing_locked();
553 	hyp_spin_unlock(&trace_rb_lock);
554 }
555 
__pkvm_load_tracing(unsigned long pack_hva,size_t pack_size)556 int __pkvm_load_tracing(unsigned long pack_hva, size_t pack_size)
557 {
558 	struct hyp_trace_pack *pack = (struct hyp_trace_pack *)kern_hyp_va(pack_hva);
559 	struct trace_buffer_pack *trace_pack = &pack->trace_buffer_pack;
560 	struct hyp_buffer_page *bpage_backing_start;
561 	struct ring_buffer_pack *rb_pack;
562 	int ret, cpu;
563 
564 	if (!pack_size || !PAGE_ALIGNED(pack_hva) || !PAGE_ALIGNED(pack_size))
565 		return -EINVAL;
566 
567 	ret = __pkvm_host_donate_hyp(hyp_virt_to_pfn((void *)pack),
568 				     pack_size >> PAGE_SHIFT);
569 	if (ret)
570 		return ret;
571 
572 	hyp_spin_lock(&trace_rb_lock);
573 
574 	ret = rb_setup_bpage_backing(pack);
575 	if (ret)
576 		goto err;
577 
578 	trace_clock_update(&pack->trace_clock_data);
579 
580 	bpage_backing_start = (struct hyp_buffer_page *)hyp_buffer_pages_backing.start;
581 
582 	for_each_ring_buffer_pack(rb_pack, cpu, trace_pack) {
583 		struct hyp_rb_per_cpu *cpu_buffer;
584 		int cpu;
585 
586 		ret = -EINVAL;
587 		if (!rb_cpu_fits_pack(rb_pack, pack_hva + pack_size))
588 			break;
589 
590 		cpu = rb_pack->cpu;
591 		if (cpu >= hyp_nr_cpus)
592 			break;
593 
594 		cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
595 
596 		ret = rb_cpu_init(rb_pack, bpage_backing_start, cpu_buffer);
597 		if (ret)
598 			break;
599 
600 		/* reader page + nr pages in rb */
601 		bpage_backing_start += 1 + rb_pack->nr_pages;
602 	}
603 err:
604 	if (ret)
605 		__pkvm_teardown_tracing_locked();
606 
607 	hyp_spin_unlock(&trace_rb_lock);
608 
609 	WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn((void *)pack),
610 				       pack_size >> PAGE_SHIFT));
611 	return ret;
612 }
613 
__pkvm_enable_tracing(bool enable)614 int __pkvm_enable_tracing(bool enable)
615 {
616 	int cpu, ret = enable ? -EINVAL : 0;
617 
618 	hyp_spin_lock(&trace_rb_lock);
619 	for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
620 		struct hyp_rb_per_cpu *cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
621 
622 		if (enable) {
623 			int __ret = rb_cpu_enable(cpu_buffer);
624 
625 			if (!__ret)
626 				ret = 0;
627 		} else {
628 			rb_cpu_disable(cpu_buffer);
629 		}
630 
631 	}
632 	hyp_spin_unlock(&trace_rb_lock);
633 
634 	return ret;
635 }
636