1 #ifndef _ASM_PARISC_FUTEX_H
2 #define _ASM_PARISC_FUTEX_H
3
4 #ifdef __KERNEL__
5
6 #include <linux/futex.h>
7 #include <linux/uaccess.h>
8 #include <asm/atomic.h>
9 #include <asm/errno.h>
10
11 /* The following has to match the LWS code in syscall.S. We have
12 sixteen four-word locks. */
13
14 static inline void
_futex_spin_lock_irqsave(u32 __user * uaddr,unsigned long int * flags)15 _futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags)
16 {
17 extern u32 lws_lock_start[];
18 long index = ((long)uaddr & 0xf0) >> 2;
19 arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
20 local_irq_save(*flags);
21 arch_spin_lock(s);
22 }
23
24 static inline void
_futex_spin_unlock_irqrestore(u32 __user * uaddr,unsigned long int * flags)25 _futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
26 {
27 extern u32 lws_lock_start[];
28 long index = ((long)uaddr & 0xf0) >> 2;
29 arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
30 arch_spin_unlock(s);
31 local_irq_restore(*flags);
32 }
33
34 static inline int
arch_futex_atomic_op_inuser(int op,int oparg,int * oval,u32 __user * uaddr)35 arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
36 {
37 unsigned long int flags;
38 u32 val;
39 int oldval = 0, ret;
40
41 pagefault_disable();
42
43 _futex_spin_lock_irqsave(uaddr, &flags);
44
45 switch (op) {
46 case FUTEX_OP_SET:
47 /* *(int *)UADDR2 = OPARG; */
48 ret = get_user(oldval, uaddr);
49 if (!ret)
50 ret = put_user(oparg, uaddr);
51 break;
52 case FUTEX_OP_ADD:
53 /* *(int *)UADDR2 += OPARG; */
54 ret = get_user(oldval, uaddr);
55 if (!ret) {
56 val = oldval + oparg;
57 ret = put_user(val, uaddr);
58 }
59 break;
60 case FUTEX_OP_OR:
61 /* *(int *)UADDR2 |= OPARG; */
62 ret = get_user(oldval, uaddr);
63 if (!ret) {
64 val = oldval | oparg;
65 ret = put_user(val, uaddr);
66 }
67 break;
68 case FUTEX_OP_ANDN:
69 /* *(int *)UADDR2 &= ~OPARG; */
70 ret = get_user(oldval, uaddr);
71 if (!ret) {
72 val = oldval & ~oparg;
73 ret = put_user(val, uaddr);
74 }
75 break;
76 case FUTEX_OP_XOR:
77 /* *(int *)UADDR2 ^= OPARG; */
78 ret = get_user(oldval, uaddr);
79 if (!ret) {
80 val = oldval ^ oparg;
81 ret = put_user(val, uaddr);
82 }
83 break;
84 default:
85 ret = -ENOSYS;
86 }
87
88 _futex_spin_unlock_irqrestore(uaddr, &flags);
89
90 pagefault_enable();
91
92 if (!ret)
93 *oval = oldval;
94
95 return ret;
96 }
97
98 /* Non-atomic version */
99 static inline int
futex_atomic_cmpxchg_inatomic(u32 * uval,u32 __user * uaddr,u32 oldval,u32 newval)100 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
101 u32 oldval, u32 newval)
102 {
103 int ret;
104 u32 val;
105 unsigned long flags;
106
107 /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
108 * our gateway page, and causes no end of trouble...
109 */
110 if (segment_eq(KERNEL_DS, get_fs()) && !uaddr)
111 return -EFAULT;
112
113 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
114 return -EFAULT;
115
116 /* HPPA has no cmpxchg in hardware and therefore the
117 * best we can do here is use an array of locks. The
118 * lock selected is based on a hash of the userspace
119 * address. This should scale to a couple of CPUs.
120 */
121
122 _futex_spin_lock_irqsave(uaddr, &flags);
123
124 ret = get_user(val, uaddr);
125
126 if (!ret && val == oldval)
127 ret = put_user(newval, uaddr);
128
129 *uval = val;
130
131 _futex_spin_unlock_irqrestore(uaddr, &flags);
132
133 return ret;
134 }
135
136 #endif /*__KERNEL__*/
137 #endif /*_ASM_PARISC_FUTEX_H*/
138