1 /*
2 * Copyright (c) 2019 Nuclei Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #ifndef __CORE_FEATURE_BASE__
20 #define __CORE_FEATURE_BASE__
21 /*!
22 * @file core_feature_base.h
23 * @brief Base core feature API for Nuclei N/NX Core
24 */
25 #include <stdint.h>
26 #include "riscv_encoding.h"
27
28 #ifdef __cplusplus
29 extern "C" {
30 #endif
31
32 /**
33 * \defgroup NMSIS_Core_Registers Register Define and Type Definitions
34 * \brief Type definitions and defines for core registers.
35 *
36 * @{
37 */
38 #ifndef __RISCV_XLEN
39 /** \brief Refer to the width of an integer register in bits(either 32 or 64) */
40 #ifndef __riscv_xlen
41 #define __RISCV_XLEN 32
42 #else
43 #define __RISCV_XLEN __riscv_xlen
44 #endif
45 #endif /* __RISCV_XLEN */
46
47 /** \brief Type of Control and Status Register(CSR), depends on the XLEN defined in RISC-V */
48 #if __RISCV_XLEN == 32
49 typedef uint32_t rv_csr_t;
50 #elif __RISCV_XLEN == 64
51 typedef uint64_t rv_csr_t;
52 #else
53 typedef uint32_t rv_csr_t;
54 #endif
55 /** @} */ /* End of Doxygen Group NMSIS_Core_Registers */
56 /**
57 * \defgroup NMSIS_Core_Base_Registers Base Register Define and Type Definitions
58 * \ingroup NMSIS_Core_Registers
59 * \brief Type definitions and defines for base core registers.
60 *
61 * @{
62 */
63 /**
64 * \brief Union type to access MISA register.
65 */
66 typedef union {
67 struct {
68 rv_csr_t a:1; /*!< bit: 0 Atomic extension */
69 rv_csr_t b:1; /*!< bit: 1 Tentatively reserved for Bit-Manipulation extension */
70 rv_csr_t c:1; /*!< bit: 2 Compressed extension */
71 rv_csr_t d:1; /*!< bit: 3 Double-precision floating-point extension */
72 rv_csr_t e:1; /*!< bit: 4 RV32E base ISA */
73 rv_csr_t f:1; /*!< bit: 5 Single-precision floating-point extension */
74 rv_csr_t g:1; /*!< bit: 6 Additional standard extensions present */
75 rv_csr_t h:1; /*!< bit: 7 Hypervisor extension */
76 rv_csr_t i:1; /*!< bit: 8 RV32I/64I/128I base ISA */
77 rv_csr_t j:1; /*!< bit: 9 Tentatively reserved for Dynamically Translated Languages extension */
78 rv_csr_t _reserved1:1; /*!< bit: 10 Reserved */
79 rv_csr_t l:1; /*!< bit: 11 Tentatively reserved for Decimal Floating-Point extension */
80 rv_csr_t m:1; /*!< bit: 12 Integer Multiply/Divide extension */
81 rv_csr_t n:1; /*!< bit: 13 User-level interrupts supported */
82 rv_csr_t _reserved2:1; /*!< bit: 14 Reserved */
83 rv_csr_t p:1; /*!< bit: 15 Tentatively reserved for Packed-SIMD extension */
84 rv_csr_t q:1; /*!< bit: 16 Quad-precision floating-point extension */
85 rv_csr_t _resreved3:1; /*!< bit: 17 Reserved */
86 rv_csr_t s:1; /*!< bit: 18 Supervisor mode implemented */
87 rv_csr_t t:1; /*!< bit: 19 Tentatively reserved for Transactional Memory extension */
88 rv_csr_t u:1; /*!< bit: 20 User mode implemented */
89 rv_csr_t v:1; /*!< bit: 21 Tentatively reserved for Vector extension */
90 rv_csr_t _reserved4:1; /*!< bit: 22 Reserved */
91 rv_csr_t x:1; /*!< bit: 23 Non-standard extensions present */
92 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
93 rv_csr_t _reserved5:38; /*!< bit: 24..61 Reserved */
94 rv_csr_t mxl:2; /*!< bit: 62..63 Machine XLEN */
95 #else
96 rv_csr_t _reserved5:6; /*!< bit: 24..29 Reserved */
97 rv_csr_t mxl:2; /*!< bit: 30..31 Machine XLEN */
98 #endif
99 } b; /*!< Structure used for bit access */
100 rv_csr_t d; /*!< Type used for csr data access */
101 } CSR_MISA_Type;
102
103 /**
104 * \brief Union type to access MSTATUS configure register.
105 */
106 typedef union {
107 struct {
108 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
109 rv_csr_t _reserved0:3; /*!< bit: 0..2 Reserved */
110 rv_csr_t mie:1; /*!< bit: 3 Machine mode interrupt enable flag */
111 rv_csr_t _reserved1:3; /*!< bit: 4..6 Reserved */
112 rv_csr_t mpie:1; /*!< bit: 7 mirror of MIE flag */
113 rv_csr_t _reserved2:3; /*!< bit: 8..10 Reserved */
114 rv_csr_t mpp:2; /*!< bit: 11..12 mirror of Privilege Mode */
115 rv_csr_t fs:2; /*!< bit: 13..14 FS status flag */
116 rv_csr_t xs:2; /*!< bit: 15..16 XS status flag */
117 rv_csr_t mprv:1; /*!< bit: Machine mode PMP */
118 rv_csr_t _reserved3:14; /*!< bit: 18..31 Reserved */
119 rv_csr_t uxl:2; /*!< bit: 32..33 user mode xlen */
120 rv_csr_t _reserved6:29; /*!< bit: 34..62 Reserved */
121 rv_csr_t sd:1; /*!< bit: Dirty status for XS or FS */
122 #else
123 rv_csr_t _reserved0:1; /*!< bit: 0 Reserved */
124 rv_csr_t sie:1; /*!< bit: 1 supervisor interrupt enable flag */
125 rv_csr_t _reserved1:1; /*!< bit: 2 Reserved */
126 rv_csr_t mie:1; /*!< bit: 3 Machine mode interrupt enable flag */
127 rv_csr_t _reserved2:1; /*!< bit: 4 Reserved */
128 rv_csr_t spie:1; /*!< bit: 3 Supervisor Privilede mode interrupt enable flag */
129 rv_csr_t _reserved3:1; /*!< bit: Reserved */
130 rv_csr_t mpie:1; /*!< bit: mirror of MIE flag */
131 rv_csr_t _reserved4:3; /*!< bit: Reserved */
132 rv_csr_t mpp:2; /*!< bit: mirror of Privilege Mode */
133 rv_csr_t fs:2; /*!< bit: FS status flag */
134 rv_csr_t xs:2; /*!< bit: XS status flag */
135 rv_csr_t mprv:1; /*!< bit: Machine mode PMP */
136 rv_csr_t sum:1; /*!< bit: Supervisor Mode load and store protection */
137 rv_csr_t _reserved6:12; /*!< bit: 19..30 Reserved */
138 rv_csr_t sd:1; /*!< bit: Dirty status for XS or FS */
139 #endif
140 } b; /*!< Structure used for bit access */
141 rv_csr_t d; /*!< Type used for csr data access */
142 } CSR_MSTATUS_Type;
143
144 /**
145 * \brief Union type to access MTVEC configure register.
146 */
147 typedef union {
148 struct {
149 rv_csr_t mode:6; /*!< bit: 0..5 interrupt mode control */
150 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
151 rv_csr_t addr:58; /*!< bit: 6..63 mtvec address */
152 #else
153 rv_csr_t addr:26; /*!< bit: 6..31 mtvec address */
154 #endif
155 } b; /*!< Structure used for bit access */
156 rv_csr_t d; /*!< Type used for csr data access */
157 } CSR_MTVEC_Type;
158
159 /**
160 * \brief Union type to access MCAUSE configure register.
161 */
162 typedef union {
163 struct {
164 rv_csr_t exccode:12; /*!< bit: 11..0 exception or interrupt code */
165 rv_csr_t _reserved0:4; /*!< bit: 15..12 Reserved */
166 rv_csr_t mpil:8; /*!< bit: 23..16 Previous interrupt level */
167 rv_csr_t _reserved1:3; /*!< bit: 26..24 Reserved */
168 rv_csr_t mpie:1; /*!< bit: 27 Interrupt enable flag before enter interrupt */
169 rv_csr_t mpp:2; /*!< bit: 29..28 Privilede mode flag before enter interrupt */
170 rv_csr_t minhv:1; /*!< bit: 30 Machine interrupt vector table */
171 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
172 rv_csr_t _reserved2:32; /*!< bit: 31..62 Reserved */
173 rv_csr_t interrupt:1; /*!< bit: 63 trap type. 0 means exception and 1 means interrupt */
174 #else
175 rv_csr_t interrupt:1; /*!< bit: 31 trap type. 0 means exception and 1 means interrupt */
176 #endif
177 } b; /*!< Structure used for bit access */
178 rv_csr_t d; /*!< Type used for csr data access */
179 } CSR_MCAUSE_Type;
180
181 /**
182 * \brief Union type to access MCOUNTINHIBIT configure register.
183 */
184 typedef union {
185 struct {
186 rv_csr_t cy:1; /*!< bit: 0 1 means disable mcycle counter */
187 rv_csr_t _reserved0:1; /*!< bit: 1 Reserved */
188 rv_csr_t ir:1; /*!< bit: 2 1 means disable minstret counter */
189 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
190 rv_csr_t _reserved1:61; /*!< bit: 3..63 Reserved */
191 #else
192 rv_csr_t _reserved1:29; /*!< bit: 3..31 Reserved */
193 #endif
194 } b; /*!< Structure used for bit access */
195 rv_csr_t d; /*!< Type used for csr data access */
196 } CSR_MCOUNTINHIBIT_Type;
197
198 /**
199 * \brief Union type to access msubm configure register.
200 */
201 typedef union {
202 struct {
203 rv_csr_t _reserved0:6; /*!< bit: 0..5 Reserved */
204 rv_csr_t typ:2; /*!< bit: 6..7 current trap type */
205 rv_csr_t ptyp:2; /*!< bit: 8..9 previous trap type */
206 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
207 rv_csr_t _reserved1:54; /*!< bit: 10..63 Reserved */
208 #else
209 rv_csr_t _reserved1:22; /*!< bit: 10..31 Reserved */
210 #endif
211 } b; /*!< Structure used for bit access */
212 rv_csr_t d; /*!< Type used for csr data access */
213 } CSR_MSUBM_Type;
214
215 /**
216 * \brief Union type to access MMISC_CTRL configure register.
217 */
218 typedef union {
219 struct {
220 rv_csr_t _reserved0:3; /*!< bit: 0..2 Reserved */
221 rv_csr_t bpu:1; /*!< bit: 3 dynamic prediction enable flag */
222 rv_csr_t _reserved1:2; /*!< bit: 4..5 Reserved */
223 rv_csr_t misalign:1; /*!< bit: 6 misaligned access support flag */
224 rv_csr_t _reserved2:2; /*!< bit: 7..8 Reserved */
225 rv_csr_t nmi_cause:1; /*!< bit: 9 mnvec control and nmi mcase exccode */
226 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
227 rv_csr_t _reserved3:54; /*!< bit: 10..63 Reserved */
228 #else
229 rv_csr_t _reserved3:22; /*!< bit: 10..31 Reserved */
230 #endif
231 } b; /*!< Structure used for bit access */
232 rv_csr_t d; /*!< Type used for csr data access */
233 } CSR_MMISCCTRL_Type;
234
235
236 /**
237 * \brief Union type to access MSAVESTATUS configure register.
238 */
239 typedef union {
240 struct {
241 rv_csr_t mpie1:1; /*!< bit: 0 interrupt enable flag of fisrt level NMI/exception nestting */
242 rv_csr_t mpp1:2; /*!< bit: 1..2 privilede mode of fisrt level NMI/exception nestting */
243 rv_csr_t _reserved0:3; /*!< bit: 3..5 Reserved */
244 rv_csr_t ptyp1:2; /*!< bit: 6..7 NMI/exception type of before first nestting */
245 rv_csr_t mpie2:1; /*!< bit: 8 interrupt enable flag of second level NMI/exception nestting */
246 rv_csr_t mpp2:2; /*!< bit: 9..10 privilede mode of second level NMI/exception nestting */
247 rv_csr_t _reserved1:3; /*!< bit: 11..13 Reserved */
248 rv_csr_t ptyp2:2; /*!< bit: 14..15 NMI/exception type of before second nestting */
249 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
250 rv_csr_t _reserved2:48; /*!< bit: 16..63 Reserved*/
251 #else
252 rv_csr_t _reserved2:16; /*!< bit: 16..31 Reserved*/
253 #endif
254 } b; /*!< Structure used for bit access */
255 rv_csr_t w; /*!< Type used for csr data access */
256 } CSR_MSAVESTATUS_Type;
257 /** @} */ /* End of Doxygen Group NMSIS_Core_Base_Registers */
258
259 /* ########################### Core Function Access ########################### */
260 /**
261 * \defgroup NMSIS_Core_CSR_Register_Access Core CSR Register Access
262 * \ingroup NMSIS_Core
263 * \brief Functions to access the Core CSR Registers
264 * \details
265 *
266 * The following functions or macros provide access to Core CSR registers.
267 * - \ref NMSIS_Core_CSR_Encoding
268 * - \ref NMSIS_Core_CSR_Registers
269 * @{
270 */
271
272
273 #ifndef __ASSEMBLY__
274
275 /**
276 * \brief CSR operation Macro for csrrw instruction.
277 * \details
278 * Read the content of csr register to __v,
279 * then write content of val into csr register, then return __v
280 * \param csr CSR macro definition defined in
281 * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
282 * \param val value to store into the CSR register
283 * \return the CSR register value before written
284 */
285 #define __RV_CSR_SWAP(csr, val) \
286 ({ \
287 register rv_csr_t __v = (unsigned long)(val); \
288 __ASM volatile("csrrw %0, " STRINGIFY(csr) ", %1" \
289 : "=r"(__v) \
290 : "rK"(__v) \
291 : "memory"); \
292 __v; \
293 })
294
295 /**
296 * \brief CSR operation Macro for csrr instruction.
297 * \details
298 * Read the content of csr register to __v and return it
299 * \param csr CSR macro definition defined in
300 * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
301 * \return the CSR register value
302 */
303 #define __RV_CSR_READ(csr) \
304 ({ \
305 register rv_csr_t __v; \
306 __ASM volatile("csrr %0, " STRINGIFY(csr) \
307 : "=r"(__v) \
308 : \
309 : "memory"); \
310 __v; \
311 })
312
313 /**
314 * \brief CSR operation Macro for csrw instruction.
315 * \details
316 * Write the content of val to csr register
317 * \param csr CSR macro definition defined in
318 * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
319 * \param val value to store into the CSR register
320 */
321 #define __RV_CSR_WRITE(csr, val) \
322 ({ \
323 register rv_csr_t __v = (rv_csr_t)(val); \
324 __ASM volatile("csrw " STRINGIFY(csr) ", %0" \
325 : \
326 : "rK"(__v) \
327 : "memory"); \
328 })
329
330 /**
331 * \brief CSR operation Macro for csrrs instruction.
332 * \details
333 * Read the content of csr register to __v,
334 * then set csr register to be __v | val, then return __v
335 * \param csr CSR macro definition defined in
336 * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
337 * \param val Mask value to be used wih csrrs instruction
338 * \return the CSR register value before written
339 */
340 #define __RV_CSR_READ_SET(csr, val) \
341 ({ \
342 register rv_csr_t __v = (rv_csr_t)(val); \
343 __ASM volatile("csrrs %0, " STRINGIFY(csr) ", %1" \
344 : "=r"(__v) \
345 : "rK"(__v) \
346 : "memory"); \
347 __v; \
348 })
349
350 /**
351 * \brief CSR operation Macro for csrs instruction.
352 * \details
353 * Set csr register to be csr_content | val
354 * \param csr CSR macro definition defined in
355 * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
356 * \param val Mask value to be used wih csrs instruction
357 */
358 #define __RV_CSR_SET(csr, val) \
359 ({ \
360 register rv_csr_t __v = (rv_csr_t)(val); \
361 __ASM volatile("csrs " STRINGIFY(csr) ", %0" \
362 : \
363 : "rK"(__v) \
364 : "memory"); \
365 })
366
367 /**
368 * \brief CSR operation Macro for csrrc instruction.
369 * \details
370 * Read the content of csr register to __v,
371 * then set csr register to be __v & ~val, then return __v
372 * \param csr CSR macro definition defined in
373 * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
374 * \param val Mask value to be used wih csrrc instruction
375 * \return the CSR register value before written
376 */
377 #define __RV_CSR_READ_CLEAR(csr, val) \
378 ({ \
379 register rv_csr_t __v = (rv_csr_t)(val); \
380 __ASM volatile("csrrc %0, " STRINGIFY(csr) ", %1" \
381 : "=r"(__v) \
382 : "rK"(__v) \
383 : "memory"); \
384 __v; \
385 })
386
387 /**
388 * \brief CSR operation Macro for csrc instruction.
389 * \details
390 * Set csr register to be csr_content & ~val
391 * \param csr CSR macro definition defined in
392 * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
393 * \param val Mask value to be used wih csrc instruction
394 */
395 #define __RV_CSR_CLEAR(csr, val) \
396 ({ \
397 register rv_csr_t __v = (rv_csr_t)(val); \
398 __ASM volatile("csrc " STRINGIFY(csr) ", %0" \
399 : \
400 : "rK"(__v) \
401 : "memory"); \
402 })
403 #endif /* __ASSEMBLY__ */
404
405 /**
406 * \brief Enable IRQ Interrupts
407 * \details Enables IRQ interrupts by setting the MIE-bit in the MSTATUS Register.
408 * \remarks
409 * Can only be executed in Privileged modes.
410 */
__enable_irq(void)411 __STATIC_FORCEINLINE void __enable_irq(void)
412 {
413 __RV_CSR_SET(CSR_MSTATUS, MSTATUS_MIE);
414 }
415
416 /**
417 * \brief Disable IRQ Interrupts
418 * \details Disables IRQ interrupts by clearing the MIE-bit in the MSTATUS Register.
419 * \remarks
420 * Can only be executed in Privileged modes.
421 */
__disable_irq(void)422 __STATIC_FORCEINLINE void __disable_irq(void)
423 {
424 __RV_CSR_CLEAR(CSR_MSTATUS, MSTATUS_MIE);
425 }
426
427 /**
428 * \brief Read whole 64 bits value of mcycle counter
429 * \details This function will read the whole 64 bits of MCYCLE register
430 * \return The whole 64 bits value of MCYCLE
431 * \remarks It will work for both RV32 and RV64 to get full 64bits value of MCYCLE
432 */
__get_rv_cycle(void)433 __STATIC_FORCEINLINE uint64_t __get_rv_cycle(void)
434 {
435 #if __RISCV_XLEN == 32
436 volatile uint32_t high0, low, high;
437 uint64_t full;
438
439 high0 = __RV_CSR_READ(CSR_MCYCLEH);
440 low = __RV_CSR_READ(CSR_MCYCLE);
441 high = __RV_CSR_READ(CSR_MCYCLEH);
442 if (high0 != high) {
443 low = __RV_CSR_READ(CSR_MCYCLE);
444 }
445 full = (((uint64_t)high) << 32) | low;
446 return full;
447 #elif __RISCV_XLEN == 64
448 return (uint64_t)__RV_CSR_READ(CSR_MCYCLE);
449 #else // TODO Need cover for XLEN=128 case in future
450 return (uint64_t)__RV_CSR_READ(CSR_MCYCLE);
451 #endif
452 }
453
454 /**
455 * \brief Read whole 64 bits value of machine instruction-retired counter
456 * \details This function will read the whole 64 bits of MINSTRET register
457 * \return The whole 64 bits value of MINSTRET
458 * \remarks It will work for both RV32 and RV64 to get full 64bits value of MINSTRET
459 */
__get_rv_instret(void)460 __STATIC_FORCEINLINE uint64_t __get_rv_instret(void)
461 {
462 #if __RISCV_XLEN == 32
463 volatile uint32_t high0, low, high;
464 uint64_t full;
465
466 high0 = __RV_CSR_READ(CSR_MINSTRETH);
467 low = __RV_CSR_READ(CSR_MINSTRET);
468 high = __RV_CSR_READ(CSR_MINSTRETH);
469 if (high0 != high) {
470 low = __RV_CSR_READ(CSR_MINSTRET);
471 }
472 full = (((uint64_t)high) << 32) | low;
473 return full;
474 #elif __RISCV_XLEN == 64
475 return (uint64_t)__RV_CSR_READ(CSR_MINSTRET);
476 #else // TODO Need cover for XLEN=128 case in future
477 return (uint64_t)__RV_CSR_READ(CSR_MINSTRET);
478 #endif
479 }
480
481 /**
482 * \brief Read whole 64 bits value of real-time clock
483 * \details This function will read the whole 64 bits of TIME register
484 * \return The whole 64 bits value of TIME CSR
485 * \remarks It will work for both RV32 and RV64 to get full 64bits value of TIME
486 * \attention only available when user mode available
487 */
__get_rv_time(void)488 __STATIC_FORCEINLINE uint64_t __get_rv_time(void)
489 {
490 #if __RISCV_XLEN == 32
491 volatile uint32_t high0, low, high;
492 uint64_t full;
493
494 high0 = __RV_CSR_READ(CSR_TIMEH);
495 low = __RV_CSR_READ(CSR_TIME);
496 high = __RV_CSR_READ(CSR_TIMEH);
497 if (high0 != high) {
498 low = __RV_CSR_READ(CSR_TIME);
499 }
500 full = (((uint64_t)high) << 32) | low;
501 return full;
502 #elif __RISCV_XLEN == 64
503 return (uint64_t)__RV_CSR_READ(CSR_TIME);
504 #else // TODO Need cover for XLEN=128 case in future
505 return (uint64_t)__RV_CSR_READ(CSR_TIME);
506 #endif
507 }
508
509 /** @} */ /* End of Doxygen Group NMSIS_Core_CSR_Register_Access */
510
511 /* ########################### CPU Intrinsic Functions ########################### */
512 /**
513 * \defgroup NMSIS_Core_CPU_Intrinsic Intrinsic Functions for CPU Intructions
514 * \ingroup NMSIS_Core
515 * \brief Functions that generate RISC-V CPU instructions.
516 * \details
517 *
518 * The following functions generate specified RISC-V instructions that cannot be directly accessed by compiler.
519 * @{
520 */
521
522 /**
523 * \brief NOP Instruction
524 * \details
525 * No Operation does nothing.
526 * This instruction can be used for code alignment purposes.
527 */
__NOP(void)528 __STATIC_FORCEINLINE void __NOP(void)
529 {
530 __ASM volatile("nop");
531 }
532
533 /**
534 * \brief Wait For Interrupt
535 * \details
536 * Wait For Interrupt is is executed using CSR_WFE.WFE=0 and WFI instruction.
537 * It will suspends execution until interrupt, NMI or Debug happened.
538 * When Core is waked up by interrupt, if
539 * 1. mstatus.MIE == 1(interrupt enabled), Core will enter ISR code
540 * 2. mstatus.MIE == 0(interrupt disabled), Core will resume previous execution
541 */
__WFI(void)542 __STATIC_FORCEINLINE void __WFI(void)
543 {
544 __RV_CSR_CLEAR(CSR_WFE, WFE_WFE);
545 __ASM volatile("wfi");
546 }
547
548 /**
549 * \brief Wait For Event
550 * \details
551 * Wait For Event is executed using CSR_WFE.WFE=1 and WFI instruction.
552 * It will suspends execution until event, NMI or Debug happened.
553 * When Core is waked up, Core will resume previous execution
554 */
__WFE(void)555 __STATIC_FORCEINLINE void __WFE(void)
556 {
557 __RV_CSR_SET(CSR_WFE, WFE_WFE);
558 __ASM volatile("wfi");
559 __RV_CSR_CLEAR(CSR_WFE, WFE_WFE);
560 }
561
562 /**
563 * \brief Breakpoint Instruction
564 * \details
565 * Causes the processor to enter Debug state.
566 * Debug tools can use this to investigate system state
567 * when the instruction at a particular address is reached.
568 */
__EBREAK(void)569 __STATIC_FORCEINLINE void __EBREAK(void)
570 {
571 __ASM volatile("ebreak");
572 }
573
574 /**
575 * \brief Environment Call Instruction
576 * \details
577 * The ECALL instruction is used to make a service request to
578 * the execution environment.
579 */
__ECALL(void)580 __STATIC_FORCEINLINE void __ECALL(void)
581 {
582 __ASM volatile("ecall");
583 }
584
585 /**
586 * \brief WFI Sleep Mode enumeration
587 */
588 typedef enum WFI_SleepMode {
589 WFI_SHALLOW_SLEEP = 0, /*!< Shallow sleep mode, the core_clk will poweroff */
590 WFI_DEEP_SLEEP = 1 /*!< Deep sleep mode, the core_clk and core_ano_clk will poweroff */
591 } WFI_SleepMode_Type;
592
593 /**
594 * \brief Set Sleep mode of WFI
595 * \details
596 * Set the SLEEPVALUE CSR register to control the
597 * WFI Sleep mode.
598 * \param[in] mode The sleep mode to be set
599 */
__set_wfi_sleepmode(WFI_SleepMode_Type mode)600 __STATIC_FORCEINLINE void __set_wfi_sleepmode(WFI_SleepMode_Type mode)
601 {
602 __RV_CSR_WRITE(CSR_SLEEPVALUE, mode);
603 }
604
605 /**
606 * \brief Send TX Event
607 * \details
608 * Set the CSR TXEVT to control send a TX Event.
609 * The Core will output signal tx_evt as output event signal.
610 */
__TXEVT(void)611 __STATIC_FORCEINLINE void __TXEVT(void)
612 {
613 __RV_CSR_SET(CSR_TXEVT, 0x1);
614 }
615
616 /**
617 * \brief Enable MCYCLE counter
618 * \details
619 * Clear the CY bit of MCOUNTINHIBIT to 0 to enable MCYCLE Counter
620 */
__enable_mcycle_counter(void)621 __STATIC_FORCEINLINE void __enable_mcycle_counter(void)
622 {
623 __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_CY);
624 }
625
626 /**
627 * \brief Disable MCYCLE counter
628 * \details
629 * Set the CY bit of MCOUNTINHIBIT to 1 to disable MCYCLE Counter
630 */
__disable_mcycle_counter(void)631 __STATIC_FORCEINLINE void __disable_mcycle_counter(void)
632 {
633 __RV_CSR_SET(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_CY);
634 }
635
636 /**
637 * \brief Enable MINSTRET counter
638 * \details
639 * Clear the IR bit of MCOUNTINHIBIT to 0 to enable MINSTRET Counter
640 */
__enable_minstret_counter(void)641 __STATIC_FORCEINLINE void __enable_minstret_counter(void)
642 {
643 __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_IR);
644 }
645
646 /**
647 * \brief Disable MINSTRET counter
648 * \details
649 * Set the IR bit of MCOUNTINHIBIT to 1 to disable MINSTRET Counter
650 */
__disable_minstret_counter(void)651 __STATIC_FORCEINLINE void __disable_minstret_counter(void)
652 {
653 __RV_CSR_SET(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_IR);
654 }
655
656 /**
657 * \brief Enable MCYCLE & MINSTRET counter
658 * \details
659 * Clear the IR and CY bit of MCOUNTINHIBIT to 1 to enable MINSTRET & MCYCLE Counter
660 */
__enable_all_counter(void)661 __STATIC_FORCEINLINE void __enable_all_counter(void)
662 {
663 __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_IR|MCOUNTINHIBIT_CY);
664 }
665
666 /**
667 * \brief Disable MCYCLE & MINSTRET counter
668 * \details
669 * Set the IR and CY bit of MCOUNTINHIBIT to 1 to disable MINSTRET & MCYCLE Counter
670 */
__disable_all_counter(void)671 __STATIC_FORCEINLINE void __disable_all_counter(void)
672 {
673 __RV_CSR_SET(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_IR|MCOUNTINHIBIT_CY);
674 }
675
676 /**
677 * \brief Execute fence instruction, p -> pred, s -> succ
678 * \details
679 * the FENCE instruction ensures that all memory accesses from instructions preceding
680 * the fence in program order (the `predecessor set`) appear earlier in the global memory order than
681 * memory accesses from instructions appearing after the fence in program order (the `successor set`).
682 * For details, please refer to The RISC-V Instruction Set Manual
683 * \param p predecessor set, such as iorw, rw, r, w
684 * \param s successor set, such as iorw, rw, r, w
685 **/
686 #define __FENCE(p, s) __ASM volatile ("fence " #p "," #s : : : "memory")
687
688 /**
689 * \brief Fence.i Instruction
690 * \details
691 * The FENCE.I instruction is used to synchronize the instruction
692 * and data streams.
693 */
__FENCE_I(void)694 __STATIC_FORCEINLINE void __FENCE_I(void)
695 {
696 __ASM volatile("fence.i");
697 }
698
699 /** \brief Read & Write Memory barrier */
700 #define __RWMB() __FENCE(iorw,iorw)
701
702 /** \brief Read Memory barrier */
703 #define __RMB() __FENCE(ir,ir)
704
705 /** \brief Write Memory barrier */
706 #define __WMB() __FENCE(ow,ow)
707
708 /** \brief SMP Read & Write Memory barrier */
709 #define __SMP_RWMB() __FENCE(rw,rw)
710
711 /** \brief SMP Read Memory barrier */
712 #define __SMP_RMB() __FENCE(r,r)
713
714 /** \brief SMP Write Memory barrier */
715 #define __SMP_WMB() __FENCE(w,w)
716
717 /** \brief CPU relax for busy loop */
718 #define __CPU_RELAX() __ASM volatile ("" : : : "memory")
719
720
721 /* ===== Load/Store Operations ===== */
722 /**
723 * \brief Load 8bit value from address (8 bit)
724 * \details Load 8 bit value.
725 * \param [in] addr Address pointer to data
726 * \return value of type uint8_t at (*addr)
727 */
__LB(volatile void * addr)728 __STATIC_FORCEINLINE uint8_t __LB(volatile void *addr)
729 {
730 uint8_t result;
731
732 __ASM volatile ("lb %0, 0(%1)" : "=r" (result) : "r" (addr));
733 return result;
734 }
735
736 /**
737 * \brief Load 16bit value from address (16 bit)
738 * \details Load 16 bit value.
739 * \param [in] addr Address pointer to data
740 * \return value of type uint16_t at (*addr)
741 */
__LH(volatile void * addr)742 __STATIC_FORCEINLINE uint16_t __LH(volatile void *addr)
743 {
744 uint16_t result;
745
746 __ASM volatile ("lh %0, 0(%1)" : "=r" (result) : "r" (addr));
747 return result;
748 }
749
750 /**
751 * \brief Load 32bit value from address (32 bit)
752 * \details Load 32 bit value.
753 * \param [in] addr Address pointer to data
754 * \return value of type uint32_t at (*addr)
755 */
__LW(volatile void * addr)756 __STATIC_FORCEINLINE uint32_t __LW(volatile void *addr)
757 {
758 uint32_t result;
759
760 __ASM volatile ("lw %0, 0(%1)" : "=r" (result) : "r" (addr));
761 return result;
762 }
763
764 #if __RISCV_XLEN != 32
765 /**
766 * \brief Load 64bit value from address (64 bit)
767 * \details Load 64 bit value.
768 * \param [in] addr Address pointer to data
769 * \return value of type uint64_t at (*addr)
770 * \remarks RV64 only macro
771 */
__LD(volatile void * addr)772 __STATIC_FORCEINLINE uint64_t __LD(volatile void *addr)
773 {
774 uint64_t result;
775 __ASM volatile ("ld %0, 0(%1)" : "=r" (result) : "r" (addr));
776 return result;
777 }
778 #endif
779
780 /**
781 * \brief Write 8bit value to address (8 bit)
782 * \details Write 8 bit value.
783 * \param [in] addr Address pointer to data
784 * \param [in] val Value to set
785 */
__SB(volatile void * addr,uint8_t val)786 __STATIC_FORCEINLINE void __SB(volatile void *addr, uint8_t val)
787 {
788 __ASM volatile ("sb %0, 0(%1)" : : "r" (val), "r" (addr));
789 }
790
791 /**
792 * \brief Write 16bit value to address (16 bit)
793 * \details Write 16 bit value.
794 * \param [in] addr Address pointer to data
795 * \param [in] val Value to set
796 */
__SH(volatile void * addr,uint16_t val)797 __STATIC_FORCEINLINE void __SH(volatile void *addr, uint16_t val)
798 {
799 __ASM volatile ("sh %0, 0(%1)" : : "r" (val), "r" (addr));
800 }
801
802 /**
803 * \brief Write 32bit value to address (32 bit)
804 * \details Write 32 bit value.
805 * \param [in] addr Address pointer to data
806 * \param [in] val Value to set
807 */
__SW(volatile void * addr,uint32_t val)808 __STATIC_FORCEINLINE void __SW(volatile void *addr, uint32_t val)
809 {
810 __ASM volatile ("sw %0, 0(%1)" : : "r" (val), "r" (addr));
811 }
812
813 #if __RISCV_XLEN != 32
814 /**
815 * \brief Write 64bit value to address (64 bit)
816 * \details Write 64 bit value.
817 * \param [in] addr Address pointer to data
818 * \param [in] val Value to set
819 */
__SD(volatile void * addr,uint64_t val)820 __STATIC_FORCEINLINE void __SD(volatile void *addr, uint64_t val)
821 {
822 __ASM volatile ("sd %0, 0(%1)" : : "r" (val), "r" (addr));
823 }
824 #endif
825
826 /**
827 * \brief Compare and Swap 32bit value using LR and SC
828 * \details Compare old value with memory, if identical,
829 * store new value in memory. Return the initial value in memory.
830 * Success is indicated by comparing return value with OLD.
831 * memory address, return 0 if successful, otherwise return !0
832 * \param [in] addr Address pointer to data, address need to be 4byte aligned
833 * \param [in] oldval Old value of the data in address
834 * \param [in] newval New value to be stored into the address
835 * \return return the initial value in memory
836 */
__CAS_W(volatile uint32_t * addr,uint32_t oldval,uint32_t newval)837 __STATIC_FORCEINLINE uint32_t __CAS_W(volatile uint32_t *addr, uint32_t oldval, uint32_t newval)
838 {
839 register uint32_t result;
840 register uint32_t rc;
841
842 __ASM volatile ( \
843 "0: lr.w %0, %2 \n" \
844 " bne %0, %z3, 1f \n" \
845 " sc.w %1, %z4, %2 \n" \
846 " bnez %1, 0b \n" \
847 "1:\n" \
848 : "=&r"(result), "=&r"(rc), "+A"(*addr) \
849 : "r"(oldval), "r"(newval) \
850 : "memory");
851 return result;
852 }
853
854 /**
855 * \brief Atomic Swap 32bit value into memory
856 * \details Atomically swap new 32bit value into memory using amoswap.d.
857 * \param [in] addr Address pointer to data, address need to be 4byte aligned
858 * \param [in] newval New value to be stored into the address
859 * \return return the original value in memory
860 */
__AMOSWAP_W(volatile uint32_t * addr,uint32_t newval)861 __STATIC_FORCEINLINE uint32_t __AMOSWAP_W(volatile uint32_t *addr, uint32_t newval)
862 {
863 register uint32_t result;
864
865 __ASM volatile ("amoswap.w %0, %2, %1" : \
866 "=r"(result), "+A"(*addr) : "r"(newval) : "memory");
867 return result;
868 }
869
870 /**
871 * \brief Atomic Add with 32bit value
872 * \details Atomically ADD 32bit value with value in memory using amoadd.d.
873 * \param [in] addr Address pointer to data, address need to be 4byte aligned
874 * \param [in] value value to be ADDed
875 * \return return memory value + add value
876 */
__AMOADD_W(volatile int32_t * addr,int32_t value)877 __STATIC_FORCEINLINE int32_t __AMOADD_W(volatile int32_t *addr, int32_t value)
878 {
879 register int32_t result;
880
881 __ASM volatile ("amoadd.w %0, %2, %1" : \
882 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
883 return *addr;
884 }
885
886 /**
887 * \brief Atomic And with 32bit value
888 * \details Atomically AND 32bit value with value in memory using amoand.d.
889 * \param [in] addr Address pointer to data, address need to be 4byte aligned
890 * \param [in] value value to be ANDed
891 * \return return memory value & and value
892 */
__AMOAND_W(volatile int32_t * addr,int32_t value)893 __STATIC_FORCEINLINE int32_t __AMOAND_W(volatile int32_t *addr, int32_t value)
894 {
895 register int32_t result;
896
897 __ASM volatile ("amoand.w %0, %2, %1" : \
898 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
899 return *addr;
900 }
901
902 /**
903 * \brief Atomic OR with 32bit value
904 * \details Atomically OR 32bit value with value in memory using amoor.d.
905 * \param [in] addr Address pointer to data, address need to be 4byte aligned
906 * \param [in] value value to be ORed
907 * \return return memory value | and value
908 */
__AMOOR_W(volatile int32_t * addr,int32_t value)909 __STATIC_FORCEINLINE int32_t __AMOOR_W(volatile int32_t *addr, int32_t value)
910 {
911 register int32_t result;
912
913 __ASM volatile ("amoor.w %0, %2, %1" : \
914 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
915 return *addr;
916 }
917
918 /**
919 * \brief Atomic XOR with 32bit value
920 * \details Atomically XOR 32bit value with value in memory using amoxor.d.
921 * \param [in] addr Address pointer to data, address need to be 4byte aligned
922 * \param [in] value value to be XORed
923 * \return return memory value ^ and value
924 */
__AMOXOR_W(volatile int32_t * addr,int32_t value)925 __STATIC_FORCEINLINE int32_t __AMOXOR_W(volatile int32_t *addr, int32_t value)
926 {
927 register int32_t result;
928
929 __ASM volatile ("amoxor.w %0, %2, %1" : \
930 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
931 return *addr;
932 }
933
934 /**
935 * \brief Atomic unsigned MAX with 32bit value
936 * \details Atomically unsigned max compare 32bit value with value in memory using amomaxu.d.
937 * \param [in] addr Address pointer to data, address need to be 4byte aligned
938 * \param [in] value value to be compared
939 * \return return the bigger value
940 */
__AMOMAXU_W(volatile uint32_t * addr,uint32_t value)941 __STATIC_FORCEINLINE uint32_t __AMOMAXU_W(volatile uint32_t *addr, uint32_t value)
942 {
943 register uint32_t result;
944
945 __ASM volatile ("amomaxu.w %0, %2, %1" : \
946 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
947 return *addr;
948 }
949
950 /**
951 * \brief Atomic signed MAX with 32bit value
952 * \details Atomically signed max compare 32bit value with value in memory using amomax.d.
953 * \param [in] addr Address pointer to data, address need to be 4byte aligned
954 * \param [in] value value to be compared
955 * \return the bigger value
956 */
__AMOMAX_W(volatile int32_t * addr,int32_t value)957 __STATIC_FORCEINLINE int32_t __AMOMAX_W(volatile int32_t *addr, int32_t value)
958 {
959 register int32_t result;
960
961 __ASM volatile ("amomax.w %0, %2, %1" : \
962 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
963 return *addr;
964 }
965
966 /**
967 * \brief Atomic unsigned MIN with 32bit value
968 * \details Atomically unsigned min compare 32bit value with value in memory using amominu.d.
969 * \param [in] addr Address pointer to data, address need to be 4byte aligned
970 * \param [in] value value to be compared
971 * \return the smaller value
972 */
__AMOMINU_W(volatile uint32_t * addr,uint32_t value)973 __STATIC_FORCEINLINE uint32_t __AMOMINU_W(volatile uint32_t *addr, uint32_t value)
974 {
975 register uint32_t result;
976
977 __ASM volatile ("amominu.w %0, %2, %1" : \
978 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
979 return *addr;
980 }
981
982 /**
983 * \brief Atomic signed MIN with 32bit value
984 * \details Atomically signed min compare 32bit value with value in memory using amomin.d.
985 * \param [in] addr Address pointer to data, address need to be 4byte aligned
986 * \param [in] value value to be compared
987 * \return the smaller value
988 */
__AMOMIN_W(volatile int32_t * addr,int32_t value)989 __STATIC_FORCEINLINE int32_t __AMOMIN_W(volatile int32_t *addr, int32_t value)
990 {
991 register int32_t result;
992
993 __ASM volatile ("amomin.w %0, %2, %1" : \
994 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
995 return *addr;
996 }
997
998 #if __RISCV_XLEN == 64
999 /**
1000 * \brief Compare and Swap 64bit value using LR and SC
1001 * \details Compare old value with memory, if identical,
1002 * store new value in memory. Return the initial value in memory.
1003 * Success is indicated by comparing return value with OLD.
1004 * memory address, return 0 if successful, otherwise return !0
1005 * \param [in] addr Address pointer to data, address need to be 8byte aligned
1006 * \param [in] oldval Old value of the data in address
1007 * \param [in] newval New value to be stored into the address
1008 * \return return the initial value in memory
1009 */
__CAS_D(volatile uint64_t * addr,uint64_t oldval,uint64_t newval)1010 __STATIC_FORCEINLINE uint64_t __CAS_D(volatile uint64_t *addr, uint64_t oldval, uint64_t newval)
1011 {
1012 register uint64_t result;
1013 register uint64_t rc;
1014
1015 __ASM volatile ( \
1016 "0: lr.d %0, %2 \n" \
1017 " bne %0, %z3, 1f \n" \
1018 " sc.d %1, %z4, %2 \n" \
1019 " bnez %1, 0b \n" \
1020 "1:\n" \
1021 : "=&r"(result), "=&r"(rc), "+A"(*addr) \
1022 : "r"(oldval), "r"(newval) \
1023 : "memory");
1024 return result;
1025 }
1026
1027 /**
1028 * \brief Atomic Swap 64bit value into memory
1029 * \details Atomically swap new 64bit value into memory using amoswap.d.
1030 * \param [in] addr Address pointer to data, address need to be 8byte aligned
1031 * \param [in] newval New value to be stored into the address
1032 * \return return the original value in memory
1033 */
__AMOSWAP_D(volatile uint64_t * addr,uint64_t newval)1034 __STATIC_FORCEINLINE uint64_t __AMOSWAP_D(volatile uint64_t *addr, uint64_t newval)
1035 {
1036 register uint64_t result;
1037
1038 __ASM volatile ("amoswap.d %0, %2, %1" : \
1039 "=r"(result), "+A"(*addr) : "r"(newval) : "memory");
1040 return result;
1041 }
1042
1043 /**
1044 * \brief Atomic Add with 64bit value
1045 * \details Atomically ADD 64bit value with value in memory using amoadd.d.
1046 * \param [in] addr Address pointer to data, address need to be 8byte aligned
1047 * \param [in] value value to be ADDed
1048 * \return return memory value + add value
1049 */
__AMOADD_D(volatile int64_t * addr,int64_t value)1050 __STATIC_FORCEINLINE int64_t __AMOADD_D(volatile int64_t *addr, int64_t value)
1051 {
1052 register int64_t result;
1053
1054 __ASM volatile ("amoadd.d %0, %2, %1" : \
1055 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
1056 return *addr;
1057 }
1058
1059 /**
1060 * \brief Atomic And with 64bit value
1061 * \details Atomically AND 64bit value with value in memory using amoand.d.
1062 * \param [in] addr Address pointer to data, address need to be 8byte aligned
1063 * \param [in] value value to be ANDed
1064 * \return return memory value & and value
1065 */
__AMOAND_D(volatile int64_t * addr,int64_t value)1066 __STATIC_FORCEINLINE int64_t __AMOAND_D(volatile int64_t *addr, int64_t value)
1067 {
1068 register int64_t result;
1069
1070 __ASM volatile ("amoand.d %0, %2, %1" : \
1071 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
1072 return *addr;
1073 }
1074
1075 /**
1076 * \brief Atomic OR with 64bit value
1077 * \details Atomically OR 64bit value with value in memory using amoor.d.
1078 * \param [in] addr Address pointer to data, address need to be 8byte aligned
1079 * \param [in] value value to be ORed
1080 * \return return memory value | and value
1081 */
__AMOOR_D(volatile int64_t * addr,int64_t value)1082 __STATIC_FORCEINLINE int64_t __AMOOR_D(volatile int64_t *addr, int64_t value)
1083 {
1084 register int64_t result;
1085
1086 __ASM volatile ("amoor.d %0, %2, %1" : \
1087 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
1088 return *addr;
1089 }
1090
1091 /**
1092 * \brief Atomic XOR with 64bit value
1093 * \details Atomically XOR 64bit value with value in memory using amoxor.d.
1094 * \param [in] addr Address pointer to data, address need to be 8byte aligned
1095 * \param [in] value value to be XORed
1096 * \return return memory value ^ and value
1097 */
__AMOXOR_D(volatile int64_t * addr,int64_t value)1098 __STATIC_FORCEINLINE int64_t __AMOXOR_D(volatile int64_t *addr, int64_t value)
1099 {
1100 register int64_t result;
1101
1102 __ASM volatile ("amoxor.d %0, %2, %1" : \
1103 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
1104 return *addr;
1105 }
1106
1107 /**
1108 * \brief Atomic unsigned MAX with 64bit value
1109 * \details Atomically unsigned max compare 64bit value with value in memory using amomaxu.d.
1110 * \param [in] addr Address pointer to data, address need to be 8byte aligned
1111 * \param [in] value value to be compared
1112 * \return return the bigger value
1113 */
__AMOMAXU_D(volatile uint64_t * addr,uint64_t value)1114 __STATIC_FORCEINLINE uint64_t __AMOMAXU_D(volatile uint64_t *addr, uint64_t value)
1115 {
1116 register uint64_t result;
1117
1118 __ASM volatile ("amomaxu.d %0, %2, %1" : \
1119 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
1120 return *addr;
1121 }
1122
1123 /**
1124 * \brief Atomic signed MAX with 64bit value
1125 * \details Atomically signed max compare 64bit value with value in memory using amomax.d.
1126 * \param [in] addr Address pointer to data, address need to be 8byte aligned
1127 * \param [in] value value to be compared
1128 * \return the bigger value
1129 */
__AMOMAX_D(volatile int64_t * addr,int64_t value)1130 __STATIC_FORCEINLINE int64_t __AMOMAX_D(volatile int64_t *addr, int64_t value)
1131 {
1132 register int64_t result;
1133
1134 __ASM volatile ("amomax.d %0, %2, %1" : \
1135 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
1136 return *addr;
1137 }
1138
1139 /**
1140 * \brief Atomic unsigned MIN with 64bit value
1141 * \details Atomically unsigned min compare 64bit value with value in memory using amominu.d.
1142 * \param [in] addr Address pointer to data, address need to be 8byte aligned
1143 * \param [in] value value to be compared
1144 * \return the smaller value
1145 */
__AMOMINU_D(volatile uint64_t * addr,uint64_t value)1146 __STATIC_FORCEINLINE uint64_t __AMOMINU_D(volatile uint64_t *addr, uint64_t value)
1147 {
1148 register uint64_t result;
1149
1150 __ASM volatile ("amominu.d %0, %2, %1" : \
1151 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
1152 return *addr;
1153 }
1154
1155 /**
1156 * \brief Atomic signed MIN with 64bit value
1157 * \details Atomically signed min compare 64bit value with value in memory using amomin.d.
1158 * \param [in] addr Address pointer to data, address need to be 8byte aligned
1159 * \param [in] value value to be compared
1160 * \return the smaller value
1161 */
__AMOMIN_D(volatile int64_t * addr,int64_t value)1162 __STATIC_FORCEINLINE int64_t __AMOMIN_D(volatile int64_t *addr, int64_t value)
1163 {
1164 register int64_t result;
1165
1166 __ASM volatile ("amomin.d %0, %2, %1" : \
1167 "=r"(result), "+A"(*addr) : "r"(value) : "memory");
1168 return *addr;
1169 }
1170 #endif /* __RISCV_XLEN == 64 */
1171
1172 /** @} */ /* End of Doxygen Group NMSIS_Core_CPU_Intrinsic */
1173
1174 #ifdef __cplusplus
1175 }
1176 #endif
1177 #endif /* __CORE_FEATURE_BASE__ */
1178