• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __ASM_X86_REFCOUNT_H
2 #define __ASM_X86_REFCOUNT_H
3 /*
4  * x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from
5  * PaX/grsecurity.
6  */
7 #include <linux/refcount.h>
8 
9 /*
10  * This is the first portion of the refcount error handling, which lives in
11  * .text.unlikely, and is jumped to from the CPU flag check (in the
12  * following macros). This saves the refcount value location into CX for
13  * the exception handler to use (in mm/extable.c), and then triggers the
14  * central refcount exception. The fixup address for the exception points
15  * back to the regular execution flow in .text.
16  */
17 #define _REFCOUNT_EXCEPTION				\
18 	".pushsection .text..refcount\n"		\
19 	"111:\tlea %[counter], %%" _ASM_CX "\n"		\
20 	"112:\t" ASM_UD0 "\n"				\
21 	ASM_UNREACHABLE					\
22 	".popsection\n"					\
23 	"113:\n"					\
24 	_ASM_EXTABLE_REFCOUNT(112b, 113b)
25 
26 /* Trigger refcount exception if refcount result is negative. */
27 #define REFCOUNT_CHECK_LT_ZERO				\
28 	"js 111f\n\t"					\
29 	_REFCOUNT_EXCEPTION
30 
31 /* Trigger refcount exception if refcount result is zero or negative. */
32 #define REFCOUNT_CHECK_LE_ZERO				\
33 	"jz 111f\n\t"					\
34 	REFCOUNT_CHECK_LT_ZERO
35 
36 /* Trigger refcount exception unconditionally. */
37 #define REFCOUNT_ERROR					\
38 	"jmp 111f\n\t"					\
39 	_REFCOUNT_EXCEPTION
40 
refcount_add(unsigned int i,refcount_t * r)41 static __always_inline void refcount_add(unsigned int i, refcount_t *r)
42 {
43 	asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
44 		REFCOUNT_CHECK_LT_ZERO
45 		: [counter] "+m" (r->refs.counter)
46 		: "ir" (i)
47 		: "cc", "cx");
48 }
49 
refcount_inc(refcount_t * r)50 static __always_inline void refcount_inc(refcount_t *r)
51 {
52 	asm volatile(LOCK_PREFIX "incl %0\n\t"
53 		REFCOUNT_CHECK_LT_ZERO
54 		: [counter] "+m" (r->refs.counter)
55 		: : "cc", "cx");
56 }
57 
refcount_dec(refcount_t * r)58 static __always_inline void refcount_dec(refcount_t *r)
59 {
60 	asm volatile(LOCK_PREFIX "decl %0\n\t"
61 		REFCOUNT_CHECK_LE_ZERO
62 		: [counter] "+m" (r->refs.counter)
63 		: : "cc", "cx");
64 }
65 
66 static __always_inline __must_check
refcount_sub_and_test(unsigned int i,refcount_t * r)67 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
68 {
69 	GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
70 				  r->refs.counter, "er", i, "%0", e, "cx");
71 }
72 
refcount_dec_and_test(refcount_t * r)73 static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
74 {
75 	GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
76 				 r->refs.counter, "%0", e, "cx");
77 }
78 
79 static __always_inline __must_check
refcount_add_not_zero(unsigned int i,refcount_t * r)80 bool refcount_add_not_zero(unsigned int i, refcount_t *r)
81 {
82 	int c, result;
83 
84 	c = atomic_read(&(r->refs));
85 	do {
86 		if (unlikely(c == 0))
87 			return false;
88 
89 		result = c + i;
90 
91 		/* Did we try to increment from/to an undesirable state? */
92 		if (unlikely(c < 0 || c == INT_MAX || result < c)) {
93 			asm volatile(REFCOUNT_ERROR
94 				     : : [counter] "m" (r->refs.counter)
95 				     : "cc", "cx");
96 			break;
97 		}
98 
99 	} while (!atomic_try_cmpxchg(&(r->refs), &c, result));
100 
101 	return c != 0;
102 }
103 
refcount_inc_not_zero(refcount_t * r)104 static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r)
105 {
106 	return refcount_add_not_zero(1, r);
107 }
108 
109 #endif
110