• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 
15 #include <cstdint>
16 #include <type_traits>
17 
18 #include "gtest/gtest.h"
19 #include "pw_cpu_exception/entry.h"
20 #include "pw_cpu_exception/handler.h"
21 #include "pw_cpu_exception/support.h"
22 #include "pw_cpu_exception_cortex_m/cpu_state.h"
23 #include "pw_cpu_exception_cortex_m_private/config.h"
24 #include "pw_cpu_exception_cortex_m_private/cortex_m_constants.h"
25 #include "pw_span/span.h"
26 
27 namespace pw::cpu_exception::cortex_m {
28 namespace {
29 
30 using pw::cpu_exception::RawFaultingCpuState;
31 
32 // CMSIS/Cortex-M/ARMv7 related constants.
33 // These values are from the ARMv7-M Architecture Reference Manual DDI 0403E.b.
34 // https://static.docs.arm.com/ddi0403/e/DDI0403E_B_armv7m_arm.pdf
35 
36 // CCR flags. (ARMv7-M Section B3.2.8)
37 constexpr uint32_t kUnalignedTrapEnableMask = 0x1u << 3;
38 constexpr uint32_t kDivByZeroTrapEnableMask = 0x1u << 4;
39 
40 // Masks for individual bits of SHCSR. (ARMv7-M Section B3.2.13)
41 constexpr uint32_t kMemFaultEnableMask = 0x1 << 16;
42 constexpr uint32_t kBusFaultEnableMask = 0x1 << 17;
43 constexpr uint32_t kUsageFaultEnableMask = 0x1 << 18;
44 
45 // CPCAR mask that enables FPU. (ARMv7-M Section B3.2.20)
46 constexpr uint32_t kFpuEnableMask = (0xFu << 20);
47 
48 // Memory mapped registers. (ARMv7-M Section B3.2.2, Table B3-4)
49 volatile uint32_t& cortex_m_vtor =
50     *reinterpret_cast<volatile uint32_t*>(0xE000ED08u);
51 volatile uint32_t& cortex_m_ccr =
52     *reinterpret_cast<volatile uint32_t*>(0xE000ED14u);
53 volatile uint32_t& cortex_m_cpacr =
54     *reinterpret_cast<volatile uint32_t*>(0xE000ED88u);
55 
56 // Begin a critical section that must not be interrupted.
57 // This function disables interrupts to prevent any sort of context switch until
58 // the critical section ends. This is done by setting PRIMASK to 1 using the cps
59 // instruction.
60 //
61 // Returns the state of PRIMASK before it was disabled.
BeginCriticalSection()62 inline uint32_t BeginCriticalSection() {
63   uint32_t previous_state;
64   asm volatile(
65       " mrs %[previous_state], primask              \n"
66       " cpsid i                                     \n"
67       // clang-format off
68       : /*output=*/[previous_state]"=r"(previous_state)
69       : /*input=*/
70       : /*clobbers=*/"memory"
71       // clang-format on
72   );
73   return previous_state;
74 }
75 
76 // Ends a critical section.
77 // Restore previous previous state produced by BeginCriticalSection().
78 // Note: This does not always re-enable interrupts.
EndCriticalSection(uint32_t previous_state)79 inline void EndCriticalSection(uint32_t previous_state) {
80   asm volatile(
81       // clang-format off
82       "msr primask, %0"
83       : /*output=*/
84       : /*input=*/"r"(previous_state)
85       : /*clobbers=*/"memory"
86       // clang-format on
87   );
88 }
89 
EnableFpu()90 void EnableFpu() {
91   if (PW_ARMV7M_ENABLE_FPU == 1) {
92     cortex_m_cpacr |= kFpuEnableMask;
93   }
94 }
95 
DisableFpu()96 void DisableFpu() {
97   if (PW_ARMV7M_ENABLE_FPU == 1) {
98     cortex_m_cpacr &= ~kFpuEnableMask;
99   }
100 }
101 
102 // Counter that is incremented if the test's exception handler correctly handles
103 // a triggered exception.
104 size_t exceptions_handled = 0;
105 
106 // Global variable that triggers a single nested fault on a fault.
107 bool trigger_nested_fault = false;
108 
109 // Allow up to kMaxFaultDepth faults before determining the device is
110 // unrecoverable.
111 constexpr size_t kMaxFaultDepth = 2;
112 
113 // Variable to prevent more than kMaxFaultDepth nested crashes.
114 size_t current_fault_depth = 0;
115 
116 // Faulting pw_cpu_exception_State is copied here so values can be validated
117 // after exiting exception handler.
118 pw_cpu_exception_State captured_states[kMaxFaultDepth] = {};
119 pw_cpu_exception_State& captured_state = captured_states[0];
120 
121 // Flag used to check if the contents of span matches the captured state.
122 bool span_matches = false;
123 
124 // Variable to be manipulated by function that uses floating
125 // point to test that exceptions push Fpu state correctly.
126 // Note: don't use double because a cortex-m4f with fpv4-sp-d16
127 // will result in gcc generating code to use the software floating
128 // point support for double.
129 volatile float float_test_value;
130 
131 // Magic pattern to help identify if the exception handler's
132 // pw_cpu_exception_State pointer was pointing to captured CPU state that was
133 // pushed onto the stack when the faulting context uses the VFP. Has to be
134 // computed at runtime because it uses values only available at link time.
135 const float kFloatTestPattern = 12.345f * 67.89f;
136 
137 volatile float fpu_lhs_val = 12.345f;
138 volatile float fpu_rhs_val = 67.89f;
139 
140 // This macro provides a calculation that equals kFloatTestPattern.
141 #define _PW_TEST_FPU_OPERATION (fpu_lhs_val * fpu_rhs_val)
142 
143 // Magic pattern to help identify if the exception handler's
144 // pw_cpu_exception_State pointer was pointing to captured CPU state that was
145 // pushed onto the stack.
146 constexpr uint32_t kMagicPattern = 0xDEADBEEF;
147 
148 // This pattern serves a purpose similar to kMagicPattern, but is used for
149 // testing a nested fault to ensure both pw_cpu_exception_State objects are
150 // correctly captured.
151 constexpr uint32_t kNestedMagicPattern = 0x900DF00D;
152 
153 // The manually captured PC won't be the exact same as the faulting PC. This is
154 // the maximum tolerated distance between the two to allow the test to pass.
155 constexpr int32_t kMaxPcDistance = 4;
156 
157 // In-memory interrupt service routine vector table.
158 using InterruptVectorTable = std::aligned_storage_t<512, 512>;
159 InterruptVectorTable ram_vector_table;
160 
161 // Forward declaration of the exception handler.
162 void TestingExceptionHandler(pw_cpu_exception_State*);
163 
164 // Populate the device's registers with testable values, then trigger exception.
BeginBaseFaultTest()165 void BeginBaseFaultTest() {
166   // Make sure divide by zero causes a fault.
167   cortex_m_ccr |= kDivByZeroTrapEnableMask;
168   uint32_t magic = kMagicPattern;
169   asm volatile(
170       " mov r0, %[magic]                                      \n"
171       " mov r1, #0                                            \n"
172       " mov r2, pc                                            \n"
173       " mov r3, lr                                            \n"
174       // This instruction divides by zero.
175       " udiv r1, r1, r1                                       \n"
176       // clang-format off
177       : /*output=*/
178       : /*input=*/[magic]"r"(magic)
179       : /*clobbers=*/"r0", "r1", "r2", "r3"
180       // clang-format on
181   );
182 
183   // Check that the stack align bit was not set.
184   EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit, 0u);
185 }
186 
187 // Populate the device's registers with testable values, then trigger exception.
BeginNestedFaultTest()188 void BeginNestedFaultTest() {
189   // Make sure divide by zero causes a fault.
190   cortex_m_ccr |= kUnalignedTrapEnableMask;
191   volatile uint32_t magic = kNestedMagicPattern;
192   asm volatile(
193       " mov r0, %[magic]                                      \n"
194       " mov r1, #0                                            \n"
195       " mov r2, pc                                            \n"
196       " mov r3, lr                                            \n"
197       // This instruction does an unaligned read.
198       " ldrh r1, [%[magic_addr], 1]                           \n"
199       // clang-format off
200       : /*output=*/
201       : /*input=*/[magic]"r"(magic), [magic_addr]"r"(&magic)
202       : /*clobbers=*/"r0", "r1", "r2", "r3"
203       // clang-format on
204   );
205 }
206 
207 // Populate the device's registers with testable values, then trigger exception.
208 // This version causes stack to not be 4-byte aligned initially, testing
209 // the fault handlers correction for psp.
BeginBaseFaultUnalignedStackTest()210 void BeginBaseFaultUnalignedStackTest() {
211   // Make sure divide by zero causes a fault.
212   cortex_m_ccr |= kDivByZeroTrapEnableMask;
213   uint32_t magic = kMagicPattern;
214   asm volatile(
215       // Push one register to cause $sp to be no longer 8-byte aligned,
216       // assuming it started 8-byte aligned as expected.
217       " push {r0}                                             \n"
218       " mov r0, %[magic]                                      \n"
219       " mov r1, #0                                            \n"
220       " mov r2, pc                                            \n"
221       " mov r3, lr                                            \n"
222       // This instruction divides by zero. Our fault handler should
223       // ultimately advance the pc to the pop instruction.
224       " udiv r1, r1, r1                                       \n"
225       " pop {r0}                                              \n"
226       // clang-format off
227       : /*output=*/
228       : /*input=*/[magic]"r"(magic)
229       : /*clobbers=*/"r0", "r1", "r2", "r3"
230       // clang-format on
231   );
232 
233   // Check that the stack align bit was set.
234   EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit,
235             kPsrExtraStackAlignBit);
236 }
237 
238 // Populate some of the extended set of captured registers, then trigger
239 // exception.
BeginExtendedFaultTest()240 void BeginExtendedFaultTest() {
241   // Make sure divide by zero causes a fault.
242   cortex_m_ccr |= kDivByZeroTrapEnableMask;
243   uint32_t magic = kMagicPattern;
244   volatile uint32_t local_msp = 0;
245   volatile uint32_t local_psp = 0;
246   asm volatile(
247       " mov r4, %[magic]                                      \n"
248       " mov r5, #0                                            \n"
249       " mov r11, %[magic]                                     \n"
250       " mrs %[local_msp], msp                                 \n"
251       " mrs %[local_psp], psp                                 \n"
252       // This instruction divides by zero.
253       " udiv r5, r5, r5                                       \n"
254       // clang-format off
255       : /*output=*/[local_msp]"=r"(local_msp), [local_psp]"=r"(local_psp)
256       : /*input=*/[magic]"r"(magic)
257       : /*clobbers=*/"r0", "r4", "r5", "r11", "memory"
258       // clang-format on
259   );
260 
261   // Check that the stack align bit was not set.
262   EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit, 0u);
263 
264   // Check that the captured stack pointers matched the ones in the context of
265   // the fault.
266   EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.msp), local_msp);
267   EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.psp), local_psp);
268 }
269 
270 // Populate some of the extended set of captured registers, then trigger
271 // exception.
272 // This version causes stack to not be 4-byte aligned initially, testing
273 // the fault handlers correction for psp.
BeginExtendedFaultUnalignedStackTest()274 void BeginExtendedFaultUnalignedStackTest() {
275   // Make sure divide by zero causes a fault.
276   cortex_m_ccr |= kDivByZeroTrapEnableMask;
277   uint32_t magic = kMagicPattern;
278   volatile uint32_t local_msp = 0;
279   volatile uint32_t local_psp = 0;
280   asm volatile(
281       // Push one register to cause $sp to be no longer 8-byte aligned,
282       // assuming it started 8-byte aligned as expected.
283       " push {r0}                                             \n"
284       " mov r4, %[magic]                                      \n"
285       " mov r5, #0                                            \n"
286       " mov r11, %[magic]                                     \n"
287       " mrs %[local_msp], msp                                 \n"
288       " mrs %[local_psp], psp                                 \n"
289       // This instruction divides by zero. Our fault handler should
290       // ultimately advance the pc to the pop instruction.
291       " udiv r5, r5, r5                                       \n"
292       " pop {r0}                                              \n"
293       // clang-format off
294       : /*output=*/[local_msp]"=r"(local_msp), [local_psp]"=r"(local_psp)
295       : /*input=*/[magic]"r"(magic)
296       : /*clobbers=*/"r0", "r4", "r5", "r11", "memory"
297       // clang-format on
298   );
299 
300   // Check that the stack align bit was set.
301   EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit,
302             kPsrExtraStackAlignBit);
303 
304   // Check that the captured stack pointers matched the ones in the context of
305   // the fault.
306   EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.msp), local_msp);
307   EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.psp), local_psp);
308 }
309 
InstallVectorTableEntries()310 void InstallVectorTableEntries() {
311   uint32_t prev_state = BeginCriticalSection();
312   // If vector table is installed already, this is done.
313   if (cortex_m_vtor == reinterpret_cast<uint32_t>(&ram_vector_table)) {
314     EndCriticalSection(prev_state);
315     return;
316   }
317   // Copy table to new location since it's not guaranteed that we can write to
318   // the original one.
319   std::memcpy(&ram_vector_table,
320               reinterpret_cast<uint32_t*>(cortex_m_vtor),
321               sizeof(ram_vector_table));
322 
323   // Override exception handling vector table entries.
324   uint32_t* exception_entry_addr =
325       reinterpret_cast<uint32_t*>(pw_cpu_exception_Entry);
326   uint32_t** interrupts = reinterpret_cast<uint32_t**>(&ram_vector_table);
327   interrupts[kHardFaultIsrNum] = exception_entry_addr;
328   interrupts[kMemFaultIsrNum] = exception_entry_addr;
329   interrupts[kBusFaultIsrNum] = exception_entry_addr;
330   interrupts[kUsageFaultIsrNum] = exception_entry_addr;
331 
332   // Update Vector Table Offset Register (VTOR) to point to new vector table.
333   cortex_m_vtor = reinterpret_cast<uint32_t>(&ram_vector_table);
334   EndCriticalSection(prev_state);
335 }
336 
EnableAllFaultHandlers()337 void EnableAllFaultHandlers() {
338   cortex_m_shcsr |=
339       kMemFaultEnableMask | kBusFaultEnableMask | kUsageFaultEnableMask;
340 }
341 
Setup(bool use_fpu)342 void Setup(bool use_fpu) {
343   if (use_fpu) {
344     EnableFpu();
345   } else {
346     DisableFpu();
347   }
348   pw_cpu_exception_SetHandler(TestingExceptionHandler);
349   EnableAllFaultHandlers();
350   InstallVectorTableEntries();
351   exceptions_handled = 0;
352   current_fault_depth = 0;
353   captured_state = {};
354   float_test_value = 0.0f;
355   trigger_nested_fault = false;
356 }
357 
TEST(FaultEntry,BasicFault)358 TEST(FaultEntry, BasicFault) {
359   Setup(/*use_fpu=*/false);
360   BeginBaseFaultTest();
361   ASSERT_EQ(exceptions_handled, 1u);
362   // captured_state values must be cast since they're in a packed struct.
363   EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r0), kMagicPattern);
364   EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r1), 0u);
365   // PC is manually saved in r2 before the exception occurs (where PC is also
366   // stored). Ensure these numbers are within a reasonable distance.
367   int32_t captured_pc_distance =
368       captured_state.base.pc - captured_state.base.r2;
369   EXPECT_LT(captured_pc_distance, kMaxPcDistance);
370   EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r3),
371             static_cast<uint32_t>(captured_state.base.lr));
372 }
373 
TEST(FaultEntry,BasicUnalignedStackFault)374 TEST(FaultEntry, BasicUnalignedStackFault) {
375   Setup(/*use_fpu=*/false);
376   BeginBaseFaultUnalignedStackTest();
377   ASSERT_EQ(exceptions_handled, 1u);
378   // captured_state values must be cast since they're in a packed struct.
379   EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r0), kMagicPattern);
380   EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r1), 0u);
381   // PC is manually saved in r2 before the exception occurs (where PC is also
382   // stored). Ensure these numbers are within a reasonable distance.
383   int32_t captured_pc_distance =
384       captured_state.base.pc - captured_state.base.r2;
385   EXPECT_LT(captured_pc_distance, kMaxPcDistance);
386   EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r3),
387             static_cast<uint32_t>(captured_state.base.lr));
388 }
389 
TEST(FaultEntry,ExtendedFault)390 TEST(FaultEntry, ExtendedFault) {
391   Setup(/*use_fpu=*/false);
392   BeginExtendedFaultTest();
393   ASSERT_EQ(exceptions_handled, 1u);
394   ASSERT_TRUE(span_matches);
395   const ExtraRegisters& extended_registers = captured_state.extended;
396   // captured_state values must be cast since they're in a packed struct.
397   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
398   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
399   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
400 
401   // Check expected values for this crash.
402   EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
403             static_cast<uint32_t>(kCfsrDivbyzeroMask));
404   EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
405 }
406 
TEST(FaultEntry,ExtendedUnalignedStackFault)407 TEST(FaultEntry, ExtendedUnalignedStackFault) {
408   Setup(/*use_fpu=*/false);
409   BeginExtendedFaultUnalignedStackTest();
410   ASSERT_EQ(exceptions_handled, 1u);
411   ASSERT_TRUE(span_matches);
412   const ExtraRegisters& extended_registers = captured_state.extended;
413   // captured_state values must be cast since they're in a packed struct.
414   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
415   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
416   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
417 
418   // Check expected values for this crash.
419   EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
420             static_cast<uint32_t>(kCfsrDivbyzeroMask));
421   EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
422 }
423 
TEST(FaultEntry,NestedFault)424 TEST(FaultEntry, NestedFault) {
425   // Due to the way nesting is handled, captured_states[0] is the nested fault
426   // since that fault must be handled *FIRST*. After that fault is handled, the
427   // original fault can be correctly handled afterwards (captured into
428   // captured_states[1]).
429 
430   Setup(/*use_fpu=*/false);
431   trigger_nested_fault = true;
432   BeginBaseFaultTest();
433   ASSERT_EQ(exceptions_handled, 2u);
434 
435   // captured_state values must be cast since they're in a packed struct.
436   EXPECT_EQ(static_cast<uint32_t>(captured_states[1].base.r0), kMagicPattern);
437   EXPECT_EQ(static_cast<uint32_t>(captured_states[1].base.r1), 0u);
438   // PC is manually saved in r2 before the exception occurs (where PC is also
439   // stored). Ensure these numbers are within a reasonable distance.
440   int32_t captured_pc_distance =
441       captured_states[1].base.pc - captured_states[1].base.r2;
442   EXPECT_LT(captured_pc_distance, kMaxPcDistance);
443   EXPECT_EQ(static_cast<uint32_t>(captured_states[1].base.r3),
444             static_cast<uint32_t>(captured_states[1].base.lr));
445 
446   // NESTED STATE
447   // captured_state values must be cast since they're in a packed struct.
448   EXPECT_EQ(static_cast<uint32_t>(captured_states[0].base.r0),
449             kNestedMagicPattern);
450   EXPECT_EQ(static_cast<uint32_t>(captured_states[0].base.r1), 0u);
451   // PC is manually saved in r2 before the exception occurs (where PC is also
452   // stored). Ensure these numbers are within a reasonable distance.
453   captured_pc_distance =
454       captured_states[0].base.pc - captured_states[0].base.r2;
455   EXPECT_LT(captured_pc_distance, kMaxPcDistance);
456   EXPECT_EQ(static_cast<uint32_t>(captured_states[0].base.r3),
457             static_cast<uint32_t>(captured_states[0].base.lr));
458 }
459 
460 // Disable tests that rely on hardware FPU if this module wasn't built with
461 // hardware FPU support.
462 #if PW_ARMV7M_ENABLE_FPU == 1
463 
464 // Populate some of the extended set of captured registers, then trigger
465 // exception. This function uses floating point to validate float context
466 // is pushed correctly.
BeginExtendedFaultFloatTest()467 void BeginExtendedFaultFloatTest() {
468   float_test_value = _PW_TEST_FPU_OPERATION;
469   BeginExtendedFaultTest();
470 }
471 
472 // Populate some of the extended set of captured registers, then trigger
473 // exception.
474 // This version causes stack to not be 4-byte aligned initially, testing
475 // the fault handlers correction for psp.
476 // This function uses floating point to validate float context
477 // is pushed correctly.
BeginExtendedFaultUnalignedStackFloatTest()478 void BeginExtendedFaultUnalignedStackFloatTest() {
479   float_test_value = _PW_TEST_FPU_OPERATION;
480   BeginExtendedFaultUnalignedStackTest();
481 }
482 
TEST(FaultEntry,FloatFault)483 TEST(FaultEntry, FloatFault) {
484   Setup(/*use_fpu=*/true);
485   BeginExtendedFaultFloatTest();
486   ASSERT_EQ(exceptions_handled, 1u);
487   const ExtraRegisters& extended_registers = captured_state.extended;
488   // captured_state values must be cast since they're in a packed struct.
489   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
490   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
491   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
492 
493   // Check expected values for this crash.
494   EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
495             static_cast<uint32_t>(kCfsrDivbyzeroMask));
496   EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
497 
498   // Check fpu state was pushed during exception
499   EXPECT_FALSE(extended_registers.exc_return & kExcReturnBasicFrameMask);
500 
501   // Check float_test_value is correct
502   EXPECT_EQ(float_test_value, kFloatTestPattern);
503 }
504 
TEST(FaultEntry,FloatUnalignedStackFault)505 TEST(FaultEntry, FloatUnalignedStackFault) {
506   Setup(/*use_fpu=*/true);
507   BeginExtendedFaultUnalignedStackFloatTest();
508   ASSERT_EQ(exceptions_handled, 1u);
509   ASSERT_TRUE(span_matches);
510   const ExtraRegisters& extended_registers = captured_state.extended;
511   // captured_state values must be cast since they're in a packed struct.
512   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
513   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
514   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
515 
516   // Check expected values for this crash.
517   EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
518             static_cast<uint32_t>(kCfsrDivbyzeroMask));
519   EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
520 
521   // Check fpu state was pushed during exception.
522   EXPECT_FALSE(extended_registers.exc_return & kExcReturnBasicFrameMask);
523 
524   // Check float_test_value is correct
525   EXPECT_EQ(float_test_value, kFloatTestPattern);
526 }
527 
528 #endif  // PW_ARMV7M_ENABLE_FPU == 1
529 
TestingExceptionHandler(pw_cpu_exception_State * state)530 void TestingExceptionHandler(pw_cpu_exception_State* state) {
531   if (++current_fault_depth > kMaxFaultDepth) {
532     volatile bool loop = true;
533     while (loop) {
534       // Hit unexpected nested crash, prevent further nesting.
535     }
536   }
537 
538   if (trigger_nested_fault) {
539     // Disable nesting before triggering the nested fault to prevent infinite
540     // recursive crashes.
541     trigger_nested_fault = false;
542     BeginNestedFaultTest();
543   }
544   // Logging may require FPU (fpu instructions in vsnprintf()), so re-enable
545   // asap.
546   EnableFpu();
547 
548   // Disable traps. Must be disabled before EXPECT, as memcpy() can do unaligned
549   // operations.
550   cortex_m_ccr &= ~kUnalignedTrapEnableMask;
551   cortex_m_ccr &= ~kDivByZeroTrapEnableMask;
552 
553   // Clear HFSR forced (nested) hard fault mask if set. This will only be
554   // set by the nested fault test.
555   EXPECT_EQ(state->extended.hfsr, cortex_m_hfsr);
556   if (cortex_m_hfsr & kHfsrForcedMask) {
557     cortex_m_hfsr = kHfsrForcedMask;
558   }
559 
560   if (cortex_m_cfsr & kCfsrUnalignedMask) {
561     // Copy captured state to check later.
562     std::memcpy(&captured_states[exceptions_handled],
563                 state,
564                 sizeof(pw_cpu_exception_State));
565 
566     // Disable unaligned read/write trapping to "handle" exception.
567     cortex_m_cfsr = kCfsrUnalignedMask;
568     exceptions_handled++;
569     return;
570   } else if (cortex_m_cfsr & kCfsrDivbyzeroMask) {
571     // Copy captured state to check later.
572     std::memcpy(&captured_states[exceptions_handled],
573                 state,
574                 sizeof(pw_cpu_exception_State));
575 
576     // Ensure span compares to be the same.
577     span<const uint8_t> state_span = RawFaultingCpuState(*state);
578     EXPECT_EQ(state_span.size(), sizeof(pw_cpu_exception_State));
579     if (std::memcmp(state, state_span.data(), state_span.size()) == 0) {
580       span_matches = true;
581     } else {
582       span_matches = false;
583     }
584 
585     // Disable divide-by-zero trapping to "handle" exception.
586     cortex_m_cfsr = kCfsrDivbyzeroMask;
587     exceptions_handled++;
588     return;
589   }
590 
591   EXPECT_EQ(state->extended.shcsr, cortex_m_shcsr);
592 
593   // If an unexpected exception occurred, just enter an infinite loop.
594   while (true) {
595   }
596 }
597 
598 }  // namespace
599 }  // namespace pw::cpu_exception::cortex_m
600