• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only
2  * Copyright (C) 2020 Marvell.
3  */
4 
5 #ifndef __SOC_OTX2_ASM_H
6 #define __SOC_OTX2_ASM_H
7 
8 #include <linux/types.h>
9 #if defined(CONFIG_ARM64)
10 /*
11  * otx2_lmt_flush is used for LMT store operation.
12  * On octeontx2 platform CPT instruction enqueue and
13  * NIX packet send are only possible via LMTST
14  * operations and it uses LDEOR instruction targeting
15  * the coprocessor address.
16  */
17 #define otx2_lmt_flush(ioaddr)                          \
18 ({                                                      \
19 	u64 result = 0;                                 \
20 	__asm__ volatile(".cpu  generic+lse\n"          \
21 			 "ldeor xzr, %x[rf], [%[rs]]"   \
22 			 : [rf]"=r" (result)            \
23 			 : [rs]"r" (ioaddr));           \
24 	(result);                                       \
25 })
26 /*
27  * STEORL store to memory with release semantics.
28  * This will avoid using DMB barrier after each LMTST
29  * operation.
30  */
31 #define cn10k_lmt_flush(val, addr)			\
32 ({							\
33 	__asm__ volatile(".cpu  generic+lse\n"		\
34 			 "steorl %x[rf],[%[rs]]"		\
35 			 : [rf] "+r"(val)		\
36 			 : [rs] "r"(addr));		\
37 })
38 
otx2_atomic64_fetch_add(u64 incr,u64 * ptr)39 static inline u64 otx2_atomic64_fetch_add(u64 incr, u64 *ptr)
40 {
41 	u64 result;
42 
43 	asm volatile (".cpu  generic+lse\n"
44 		      "ldadda %x[i], %x[r], [%[b]]"
45 		      : [r] "=r" (result), "+m" (*ptr)
46 		      : [i] "r" (incr), [b] "r" (ptr)
47 		      : "memory");
48 	return result;
49 }
50 
51 #else
52 #define otx2_lmt_flush(ioaddr)          ({ 0; })
53 #define cn10k_lmt_flush(val, addr)	({ addr = val; })
54 #define otx2_atomic64_fetch_add(incr, ptr)	({ incr; })
55 #endif
56 
57 #endif /* __SOC_OTX2_ASM_H */
58