1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2023 Google LLC
4 * Author: Vincent Donnefort <vdonnefort@google.com>
5 */
6
7 #include <nvhe/alloc.h>
8 #include <nvhe/clock.h>
9 #include <nvhe/mem_protect.h>
10 #include <nvhe/mm.h>
11 #include <nvhe/trace.h>
12
13 #include <asm/percpu.h>
14 #include <asm/kvm_mmu.h>
15 #include <asm/local.h>
16
17 #define HYP_RB_PAGE_HEAD 1UL
18 #define HYP_RB_PAGE_UPDATE 2UL
19 #define HYP_RB_FLAG_MASK 3UL
20
21 struct hyp_buffer_page {
22 struct list_head list;
23 struct buffer_data_page *page;
24 unsigned long write;
25 unsigned long entries;
26 u32 id;
27 };
28
29 struct hyp_rb_per_cpu {
30 struct trace_buffer_meta *meta;
31 struct hyp_buffer_page *tail_page;
32 struct hyp_buffer_page *reader_page;
33 struct hyp_buffer_page *head_page;
34 struct hyp_buffer_page *bpages;
35 unsigned long nr_pages;
36 unsigned long last_overrun;
37 u64 write_stamp;
38 atomic_t status;
39 };
40
41 #define HYP_RB_UNAVAILABLE 0
42 #define HYP_RB_READY 1
43 #define HYP_RB_WRITING 2
44 #define HYP_RB_PANIC 3
45
46 DEFINE_PER_CPU(struct hyp_rb_per_cpu, trace_rb);
47 DEFINE_HYP_SPINLOCK(trace_rb_lock);
48
rb_set_flag(struct hyp_buffer_page * bpage,int new_flag)49 static bool rb_set_flag(struct hyp_buffer_page *bpage, int new_flag)
50 {
51 unsigned long ret, val = (unsigned long)bpage->list.next;
52
53 ret = cmpxchg((unsigned long *)&bpage->list.next,
54 val, (val & ~HYP_RB_FLAG_MASK) | new_flag);
55
56 return ret == val;
57 }
58
rb_hyp_buffer_page(struct list_head * list)59 static struct hyp_buffer_page *rb_hyp_buffer_page(struct list_head *list)
60 {
61 unsigned long ptr = (unsigned long)list & ~HYP_RB_FLAG_MASK;
62
63 return container_of((struct list_head *)ptr, struct hyp_buffer_page, list);
64 }
65
rb_next_page(struct hyp_buffer_page * bpage)66 static struct hyp_buffer_page *rb_next_page(struct hyp_buffer_page *bpage)
67 {
68 return rb_hyp_buffer_page(bpage->list.next);
69 }
70
rb_is_head_page(struct hyp_buffer_page * bpage)71 static bool rb_is_head_page(struct hyp_buffer_page *bpage)
72 {
73 return (unsigned long)bpage->list.prev->next & HYP_RB_PAGE_HEAD;
74 }
75
rb_set_head_page(struct hyp_rb_per_cpu * cpu_buffer)76 static struct hyp_buffer_page *rb_set_head_page(struct hyp_rb_per_cpu *cpu_buffer)
77 {
78 struct hyp_buffer_page *bpage, *prev_head;
79 int cnt = 0;
80
81 again:
82 bpage = prev_head = cpu_buffer->head_page;
83 do {
84 if (rb_is_head_page(bpage)) {
85 cpu_buffer->head_page = bpage;
86 return bpage;
87 }
88
89 bpage = rb_next_page(bpage);
90 } while (bpage != prev_head);
91
92 /* We might have race with the writer let's try again */
93 if (++cnt < 3)
94 goto again;
95
96 return NULL;
97 }
98
rb_swap_reader_page(struct hyp_rb_per_cpu * cpu_buffer)99 static int rb_swap_reader_page(struct hyp_rb_per_cpu *cpu_buffer)
100 {
101 unsigned long *old_head_link, old_link_val, new_link_val, overrun;
102 struct hyp_buffer_page *head, *reader = cpu_buffer->reader_page;
103
104 spin:
105 /* Update the cpu_buffer->header_page according to HYP_RB_PAGE_HEAD */
106 head = rb_set_head_page(cpu_buffer);
107 if (!head)
108 return -ENODEV;
109
110 /* Connect the reader page around the header page */
111 reader->list.next = head->list.next;
112 reader->list.prev = head->list.prev;
113
114 /* The reader page points to the new header page */
115 rb_set_flag(reader, HYP_RB_PAGE_HEAD);
116
117 /*
118 * Paired with the cmpxchg in rb_move_tail(). Order the read of the head
119 * page and overrun.
120 */
121 smp_mb();
122 overrun = READ_ONCE(cpu_buffer->meta->overrun);
123
124 /* Try to swap the prev head link to the reader page */
125 old_head_link = (unsigned long *)&reader->list.prev->next;
126 old_link_val = (*old_head_link & ~HYP_RB_FLAG_MASK) | HYP_RB_PAGE_HEAD;
127 new_link_val = (unsigned long)&reader->list;
128 if (cmpxchg(old_head_link, old_link_val, new_link_val)
129 != old_link_val)
130 goto spin;
131
132 cpu_buffer->head_page = rb_hyp_buffer_page(reader->list.next);
133 cpu_buffer->head_page->list.prev = &reader->list;
134 cpu_buffer->reader_page = head;
135 cpu_buffer->meta->reader.lost_events = overrun - cpu_buffer->last_overrun;
136 cpu_buffer->meta->reader.id = cpu_buffer->reader_page->id;
137 cpu_buffer->last_overrun = overrun;
138
139 return 0;
140 }
141
142 static struct hyp_buffer_page *
rb_move_tail(struct hyp_rb_per_cpu * cpu_buffer)143 rb_move_tail(struct hyp_rb_per_cpu *cpu_buffer)
144 {
145 struct hyp_buffer_page *tail_page, *new_tail, *new_head;
146
147 tail_page = cpu_buffer->tail_page;
148 new_tail = rb_next_page(tail_page);
149
150 again:
151 /*
152 * We caught the reader ... Let's try to move the head page.
153 * The writer can only rely on ->next links to check if this is head.
154 */
155 if ((unsigned long)tail_page->list.next & HYP_RB_PAGE_HEAD) {
156 /* The reader moved the head in between */
157 if (!rb_set_flag(tail_page, HYP_RB_PAGE_UPDATE))
158 goto again;
159
160 WRITE_ONCE(cpu_buffer->meta->overrun,
161 cpu_buffer->meta->overrun + new_tail->entries);
162 WRITE_ONCE(meta_pages_lost(cpu_buffer->meta),
163 meta_pages_lost(cpu_buffer->meta) + 1);
164
165 /* Move the head */
166 rb_set_flag(new_tail, HYP_RB_PAGE_HEAD);
167
168 /* The new head is in place, reset the update flag */
169 rb_set_flag(tail_page, 0);
170
171 new_head = rb_next_page(new_tail);
172 }
173
174 local_set(&new_tail->page->commit, 0);
175
176 new_tail->write = 0;
177 new_tail->entries = 0;
178
179 WRITE_ONCE(meta_pages_touched(cpu_buffer->meta),
180 meta_pages_touched(cpu_buffer->meta) + 1);
181 cpu_buffer->tail_page = new_tail;
182
183 return new_tail;
184 }
185
rb_event_size(unsigned long length)186 static unsigned long rb_event_size(unsigned long length)
187 {
188 struct ring_buffer_event *event;
189
190 return length + RB_EVNT_HDR_SIZE + sizeof(event->array[0]);
191 }
192
193 static struct ring_buffer_event *
rb_add_ts_extend(struct ring_buffer_event * event,u64 delta)194 rb_add_ts_extend(struct ring_buffer_event *event, u64 delta)
195 {
196 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
197 event->time_delta = delta & TS_MASK;
198 event->array[0] = delta >> TS_SHIFT;
199
200 return (struct ring_buffer_event *)((unsigned long)event + 8);
201 }
202
203 static struct ring_buffer_event *
rb_reserve_next(struct hyp_rb_per_cpu * cpu_buffer,unsigned long length)204 rb_reserve_next(struct hyp_rb_per_cpu *cpu_buffer, unsigned long length)
205 {
206 unsigned long ts_ext_size = 0, event_size = rb_event_size(length);
207 struct hyp_buffer_page *tail_page = cpu_buffer->tail_page;
208 struct ring_buffer_event *event;
209 unsigned long write, prev_write;
210 u64 ts, time_delta;
211
212 ts = trace_clock();
213
214 time_delta = ts - cpu_buffer->write_stamp;
215
216 if (test_time_stamp(time_delta))
217 ts_ext_size = 8;
218
219 prev_write = tail_page->write;
220 write = prev_write + event_size + ts_ext_size;
221
222 if (unlikely(write > BUF_PAGE_SIZE))
223 tail_page = rb_move_tail(cpu_buffer);
224
225 if (!tail_page->entries) {
226 tail_page->page->time_stamp = ts;
227 time_delta = 0;
228 ts_ext_size = 0;
229 write = event_size;
230 prev_write = 0;
231 }
232
233 tail_page->write = write;
234 tail_page->entries++;
235
236 cpu_buffer->write_stamp = ts;
237
238 event = (struct ring_buffer_event *)(tail_page->page->data +
239 prev_write);
240 if (ts_ext_size) {
241 event = rb_add_ts_extend(event, time_delta);
242 time_delta = 0;
243 }
244
245 event->type_len = 0;
246 event->time_delta = time_delta;
247 event->array[0] = event_size - RB_EVNT_HDR_SIZE;
248
249 return event;
250 }
251
tracing_reserve_entry(unsigned long length)252 void *tracing_reserve_entry(unsigned long length)
253 {
254 struct hyp_rb_per_cpu *cpu_buffer = this_cpu_ptr(&trace_rb);
255 struct ring_buffer_event *rb_event;
256
257 if (atomic_cmpxchg(&cpu_buffer->status, HYP_RB_READY, HYP_RB_WRITING)
258 != HYP_RB_READY)
259 return NULL;
260
261 rb_event = rb_reserve_next(cpu_buffer, length);
262
263 return &rb_event->array[1];
264 }
265
tracing_commit_entry(void)266 void tracing_commit_entry(void)
267 {
268 struct hyp_rb_per_cpu *cpu_buffer = this_cpu_ptr(&trace_rb);
269
270 local_set(&cpu_buffer->tail_page->page->commit,
271 cpu_buffer->tail_page->write);
272 WRITE_ONCE(cpu_buffer->meta->entries,
273 cpu_buffer->meta->entries + 1);
274
275 /* Paired with rb_cpu_disable_writing() */
276 atomic_set_release(&cpu_buffer->status, HYP_RB_READY);
277 }
278
rb_page_init(struct hyp_buffer_page * bpage,unsigned long hva)279 static int rb_page_init(struct hyp_buffer_page *bpage, unsigned long hva)
280 {
281 void *hyp_va = (void *)kern_hyp_va(hva);
282 int ret;
283
284 ret = hyp_pin_shared_mem(hyp_va, hyp_va + PAGE_SIZE);
285 if (ret)
286 return ret;
287
288 INIT_LIST_HEAD(&bpage->list);
289 bpage->page = (struct buffer_data_page *)hyp_va;
290
291 local_set(&bpage->page->commit, 0);
292
293 return 0;
294 }
295
rb_page_reset(struct hyp_buffer_page * bpage)296 static void rb_page_reset(struct hyp_buffer_page *bpage)
297 {
298 bpage->write = 0;
299 bpage->entries = 0;
300
301 local_set(&bpage->page->commit, 0);
302 }
303
rb_cpu_loaded(struct hyp_rb_per_cpu * cpu_buffer)304 static bool rb_cpu_loaded(struct hyp_rb_per_cpu *cpu_buffer)
305 {
306 return !!cpu_buffer->bpages;
307 }
308
rb_cpu_disable_writing(struct hyp_rb_per_cpu * cpu_buffer)309 static int rb_cpu_disable_writing(struct hyp_rb_per_cpu *cpu_buffer)
310 {
311 int prev_status;
312
313 /* Wait for the buffer to be released */
314 do {
315 prev_status = atomic_cmpxchg_acquire(&cpu_buffer->status,
316 HYP_RB_READY,
317 HYP_RB_UNAVAILABLE);
318 } while (prev_status == HYP_RB_WRITING);
319
320 return prev_status;
321 }
322
rb_cpu_enable_writing(struct hyp_rb_per_cpu * cpu_buffer)323 static int rb_cpu_enable_writing(struct hyp_rb_per_cpu *cpu_buffer)
324 {
325 int prev_status;
326
327 if (!rb_cpu_loaded(cpu_buffer))
328 return -ENODEV;
329
330 prev_status = atomic_cmpxchg(&cpu_buffer->status, HYP_RB_UNAVAILABLE,
331 HYP_RB_READY);
332
333 return prev_status == HYP_RB_UNAVAILABLE ? 0 : -ENODEV;
334 }
335
rb_cpu_reset(struct hyp_rb_per_cpu * cpu_buffer)336 static int rb_cpu_reset(struct hyp_rb_per_cpu *cpu_buffer)
337 {
338 struct hyp_buffer_page *bpage;
339 int prev_status;
340
341 if (!rb_cpu_loaded(cpu_buffer))
342 return -ENODEV;
343
344 prev_status = rb_cpu_disable_writing(cpu_buffer);
345
346 if (!rb_set_head_page(cpu_buffer))
347 return -ENODEV;
348
349 cpu_buffer->tail_page = cpu_buffer->head_page;
350
351 bpage = cpu_buffer->head_page;
352 do {
353 rb_page_reset(bpage);
354 bpage = rb_next_page(bpage);
355 } while (bpage != cpu_buffer->head_page);
356
357 rb_page_reset(cpu_buffer->reader_page);
358
359 cpu_buffer->last_overrun = 0;
360 cpu_buffer->write_stamp = 0;
361
362 cpu_buffer->meta->reader.read = 0;
363 cpu_buffer->meta->reader.lost_events = 0;
364 cpu_buffer->meta->entries = 0;
365 cpu_buffer->meta->overrun = 0;
366 cpu_buffer->meta->read = 0;
367 meta_pages_lost(cpu_buffer->meta) = 0;
368 meta_pages_touched(cpu_buffer->meta) = 0;
369
370 if (prev_status == HYP_RB_READY)
371 rb_cpu_enable_writing(cpu_buffer);
372
373 return 0;
374 }
375
rb_cpu_teardown(struct hyp_rb_per_cpu * cpu_buffer)376 static void rb_cpu_teardown(struct hyp_rb_per_cpu *cpu_buffer)
377 {
378 int i;
379
380 if (!rb_cpu_loaded(cpu_buffer))
381 return;
382
383 rb_cpu_disable_writing(cpu_buffer);
384
385 hyp_unpin_shared_mem((void *)cpu_buffer->meta,
386 (void *)(cpu_buffer->meta) + PAGE_SIZE);
387
388 for (i = 0; i < cpu_buffer->nr_pages; i++) {
389 struct hyp_buffer_page *bpage = &cpu_buffer->bpages[i];
390
391 if (!bpage->page)
392 continue;
393
394 hyp_unpin_shared_mem((void *)bpage->page,
395 (void *)bpage->page + PAGE_SIZE);
396 }
397
398 hyp_free(cpu_buffer->bpages);
399 cpu_buffer->bpages = 0;
400 }
401
rb_cpu_fits_desc(struct rb_page_desc * pdesc,unsigned long desc_end)402 static bool rb_cpu_fits_desc(struct rb_page_desc *pdesc,
403 unsigned long desc_end)
404 {
405 unsigned long *end;
406
407 /* Check we can at least read nr_pages */
408 if ((unsigned long)&pdesc->nr_page_va >= desc_end)
409 return false;
410
411 end = &pdesc->page_va[pdesc->nr_page_va];
412
413 return (unsigned long)end <= desc_end;
414 }
415
rb_cpu_init(struct rb_page_desc * pdesc,struct hyp_rb_per_cpu * cpu_buffer)416 static int rb_cpu_init(struct rb_page_desc *pdesc, struct hyp_rb_per_cpu *cpu_buffer)
417 {
418 struct hyp_buffer_page *bpage;
419 int i, ret;
420
421 /* At least 1 reader page and one head */
422 if (pdesc->nr_page_va < 2)
423 return -EINVAL;
424
425 if (rb_cpu_loaded(cpu_buffer))
426 return -EBUSY;
427
428 bpage = hyp_alloc(sizeof(*bpage) * pdesc->nr_page_va);
429 if (!bpage)
430 return hyp_alloc_errno();
431 cpu_buffer->bpages = bpage;
432
433 cpu_buffer->meta = (struct trace_buffer_meta *)kern_hyp_va(pdesc->meta_va);
434 ret = hyp_pin_shared_mem((void *)cpu_buffer->meta,
435 ((void *)cpu_buffer->meta) + PAGE_SIZE);
436 if (ret) {
437 hyp_free(cpu_buffer->bpages);
438 return ret;
439 }
440
441 memset(cpu_buffer->meta, 0, sizeof(*cpu_buffer->meta));
442 cpu_buffer->meta->meta_page_size = PAGE_SIZE;
443 cpu_buffer->meta->nr_subbufs = cpu_buffer->nr_pages;
444
445 /* The reader page is not part of the ring initially */
446 ret = rb_page_init(bpage, pdesc->page_va[0]);
447 if (ret)
448 goto err;
449
450 cpu_buffer->nr_pages = 1;
451
452 cpu_buffer->reader_page = bpage;
453 cpu_buffer->tail_page = bpage + 1;
454 cpu_buffer->head_page = bpage + 1;
455
456 for (i = 1; i < pdesc->nr_page_va; i++) {
457 ret = rb_page_init(++bpage, pdesc->page_va[i]);
458 if (ret)
459 goto err;
460
461 bpage->list.next = &(bpage + 1)->list;
462 bpage->list.prev = &(bpage - 1)->list;
463 bpage->id = i;
464
465 cpu_buffer->nr_pages = i + 1;
466 }
467
468 /* Close the ring */
469 bpage->list.next = &cpu_buffer->tail_page->list;
470 cpu_buffer->tail_page->list.prev = &bpage->list;
471
472 /* The last init'ed page points to the head page */
473 rb_set_flag(bpage, HYP_RB_PAGE_HEAD);
474
475 cpu_buffer->last_overrun = 0;
476
477 return 0;
478
479 err:
480 rb_cpu_teardown(cpu_buffer);
481
482 return ret;
483 }
484
__pkvm_update_clock_tracing(u32 mult,u32 shift,u64 epoch_ns,u64 epoch_cyc)485 void __pkvm_update_clock_tracing(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc)
486 {
487 int cpu;
488
489 /* After this loop, all CPUs are observing the new bank... */
490 for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
491 struct hyp_rb_per_cpu *cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
492
493 while (atomic_read(&cpu_buffer->status) == HYP_RB_WRITING);
494 }
495
496 /* ...we can now override the old one and swap. */
497 trace_clock_update(mult, shift, epoch_ns, epoch_cyc);
498 }
499
__pkvm_swap_reader_tracing(unsigned int cpu)500 int __pkvm_swap_reader_tracing(unsigned int cpu)
501 {
502 struct hyp_rb_per_cpu *cpu_buffer;
503 int ret = 0;
504
505 if (cpu >= hyp_nr_cpus)
506 return -EINVAL;
507
508 hyp_spin_lock(&trace_rb_lock);
509
510 cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
511 if (!rb_cpu_loaded(cpu_buffer))
512 ret = -ENODEV;
513 else
514 ret = rb_swap_reader_page(cpu_buffer);
515
516 hyp_spin_unlock(&trace_rb_lock);
517
518 return ret;
519 }
520
__pkvm_teardown_tracing_locked(void)521 static void __pkvm_teardown_tracing_locked(void)
522 {
523 int cpu;
524
525 hyp_assert_lock_held(&trace_rb_lock);
526
527 for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
528 struct hyp_rb_per_cpu *cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
529
530 rb_cpu_teardown(cpu_buffer);
531 }
532 }
533
__pkvm_teardown_tracing(void)534 void __pkvm_teardown_tracing(void)
535 {
536 hyp_spin_lock(&trace_rb_lock);
537 __pkvm_teardown_tracing_locked();
538 hyp_spin_unlock(&trace_rb_lock);
539 }
540
__pkvm_load_tracing(unsigned long desc_hva,size_t desc_size)541 int __pkvm_load_tracing(unsigned long desc_hva, size_t desc_size)
542 {
543 struct hyp_trace_desc *desc = (struct hyp_trace_desc *)kern_hyp_va(desc_hva);
544 struct trace_page_desc *trace_pdesc = &desc->page_desc;
545 struct rb_page_desc *pdesc;
546 int ret, pdesc_cpu;
547
548 if (!desc_size || !PAGE_ALIGNED(desc_hva) || !PAGE_ALIGNED(desc_size))
549 return -EINVAL;
550
551 ret = __pkvm_host_donate_hyp(hyp_virt_to_pfn((void *)desc),
552 desc_size >> PAGE_SHIFT);
553 if (ret)
554 return ret;
555
556 hyp_spin_lock(&trace_rb_lock);
557
558 for_each_rb_page_desc(pdesc, pdesc_cpu, trace_pdesc) {
559 struct hyp_rb_per_cpu *cpu_buffer;
560 unsigned int cpu;
561
562 ret = -EINVAL;
563 if (!rb_cpu_fits_desc(pdesc, desc_hva + desc_size))
564 break;
565
566 cpu = pdesc->cpu;
567 if (cpu >= hyp_nr_cpus)
568 break;
569
570 cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
571
572 ret = rb_cpu_init(pdesc, cpu_buffer);
573 if (ret)
574 break;
575 }
576
577 if (ret)
578 __pkvm_teardown_tracing_locked();
579
580 hyp_spin_unlock(&trace_rb_lock);
581
582 WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn((void *)desc),
583 desc_size >> PAGE_SHIFT));
584 return ret;
585 }
586
__pkvm_enable_tracing(bool enable)587 int __pkvm_enable_tracing(bool enable)
588 {
589 int cpu, ret = enable ? -EINVAL : 0;
590
591 hyp_spin_lock(&trace_rb_lock);
592 for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
593 struct hyp_rb_per_cpu *cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
594
595 if (enable) {
596 if (!rb_cpu_enable_writing(cpu_buffer))
597 ret = 0;
598 } else {
599 rb_cpu_disable_writing(cpu_buffer);
600 }
601
602 }
603 hyp_spin_unlock(&trace_rb_lock);
604
605 return ret;
606 }
607
__pkvm_reset_tracing(unsigned int cpu)608 int __pkvm_reset_tracing(unsigned int cpu)
609 {
610 int ret = 0;
611
612 if (cpu >= hyp_nr_cpus)
613 return -EINVAL;
614
615 hyp_spin_lock(&trace_rb_lock);
616 ret = rb_cpu_reset(per_cpu_ptr(&trace_rb, cpu));
617 hyp_spin_unlock(&trace_rb_lock);
618
619 return ret;
620 }
621
__pkvm_panic_tracing(void)622 void __pkvm_panic_tracing(void)
623 {
624 #ifdef CONFIG_PKVM_DUMP_TRACE_ON_PANIC
625 int cpu;
626
627 hyp_spin_lock(&trace_rb_lock);
628
629 for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
630 struct hyp_rb_per_cpu *cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
631 int prev_status, skipped = 0;
632
633 if (!rb_cpu_loaded(cpu_buffer))
634 continue;
635
636 do {
637 prev_status = atomic_cmpxchg_acquire(&cpu_buffer->status, HYP_RB_READY,
638 HYP_RB_PANIC);
639 } while (prev_status == HYP_RB_WRITING);
640
641 /* Allow the host to read the very last events */
642 while (cpu_buffer->tail_page != cpu_buffer->reader_page) {
643 struct hyp_buffer_page *prev_reader = cpu_buffer->reader_page;
644
645 if (rb_swap_reader_page(cpu_buffer))
646 break;
647
648 /*
649 * The reader is still on the previous reader page and events there can
650 * still be read.
651 */
652 if (++skipped == 1)
653 continue;
654
655 WRITE_ONCE(cpu_buffer->meta->overrun,
656 cpu_buffer->meta->overrun + prev_reader->entries);
657 cpu_buffer->meta->reader.lost_events = cpu_buffer->meta->overrun -
658 cpu_buffer->last_overrun;
659 WRITE_ONCE(meta_pages_lost(cpu_buffer->meta),
660 meta_pages_lost(cpu_buffer->meta) + 1);
661 }
662 }
663
664 hyp_spin_unlock(&trace_rb_lock);
665 #endif
666 }
667