• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  arch/s390/lib/spinlock.c
3  *    Out of line spinlock code.
4  *
5  *    Copyright (C) IBM Corp. 2004, 2006
6  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  */
8 
9 #include <linux/types.h>
10 #include <linux/module.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
13 #include <linux/smp.h>
14 #include <asm/io.h>
15 
16 int spin_retry = 1000;
17 
18 /**
19  * spin_retry= parameter
20  */
spin_retry_setup(char * str)21 static int __init spin_retry_setup(char *str)
22 {
23 	spin_retry = simple_strtoul(str, &str, 0);
24 	return 1;
25 }
26 __setup("spin_retry=", spin_retry_setup);
27 
arch_spin_lock_wait(arch_spinlock_t * lp)28 void arch_spin_lock_wait(arch_spinlock_t *lp)
29 {
30 	int count = spin_retry;
31 	unsigned int cpu = ~smp_processor_id();
32 	unsigned int owner;
33 
34 	while (1) {
35 		owner = lp->owner_cpu;
36 		if (!owner || smp_vcpu_scheduled(~owner)) {
37 			for (count = spin_retry; count > 0; count--) {
38 				if (arch_spin_is_locked(lp))
39 					continue;
40 				if (_raw_compare_and_swap(&lp->owner_cpu, 0,
41 							  cpu) == 0)
42 					return;
43 			}
44 			if (MACHINE_IS_LPAR)
45 				continue;
46 		}
47 		owner = lp->owner_cpu;
48 		if (owner)
49 			smp_yield_cpu(~owner);
50 		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
51 			return;
52 	}
53 }
54 EXPORT_SYMBOL(arch_spin_lock_wait);
55 
arch_spin_lock_wait_flags(arch_spinlock_t * lp,unsigned long flags)56 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
57 {
58 	int count = spin_retry;
59 	unsigned int cpu = ~smp_processor_id();
60 	unsigned int owner;
61 
62 	local_irq_restore(flags);
63 	while (1) {
64 		owner = lp->owner_cpu;
65 		if (!owner || smp_vcpu_scheduled(~owner)) {
66 			for (count = spin_retry; count > 0; count--) {
67 				if (arch_spin_is_locked(lp))
68 					continue;
69 				local_irq_disable();
70 				if (_raw_compare_and_swap(&lp->owner_cpu, 0,
71 							  cpu) == 0)
72 					return;
73 				local_irq_restore(flags);
74 			}
75 			if (MACHINE_IS_LPAR)
76 				continue;
77 		}
78 		owner = lp->owner_cpu;
79 		if (owner)
80 			smp_yield_cpu(~owner);
81 		local_irq_disable();
82 		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
83 			return;
84 		local_irq_restore(flags);
85 	}
86 }
87 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
88 
arch_spin_trylock_retry(arch_spinlock_t * lp)89 int arch_spin_trylock_retry(arch_spinlock_t *lp)
90 {
91 	unsigned int cpu = ~smp_processor_id();
92 	int count;
93 
94 	for (count = spin_retry; count > 0; count--) {
95 		if (arch_spin_is_locked(lp))
96 			continue;
97 		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
98 			return 1;
99 	}
100 	return 0;
101 }
102 EXPORT_SYMBOL(arch_spin_trylock_retry);
103 
arch_spin_relax(arch_spinlock_t * lock)104 void arch_spin_relax(arch_spinlock_t *lock)
105 {
106 	unsigned int cpu = lock->owner_cpu;
107 	if (cpu != 0) {
108 		if (MACHINE_IS_VM || MACHINE_IS_KVM ||
109 		    !smp_vcpu_scheduled(~cpu))
110 			smp_yield_cpu(~cpu);
111 	}
112 }
113 EXPORT_SYMBOL(arch_spin_relax);
114 
_raw_read_lock_wait(arch_rwlock_t * rw)115 void _raw_read_lock_wait(arch_rwlock_t *rw)
116 {
117 	unsigned int old;
118 	int count = spin_retry;
119 
120 	while (1) {
121 		if (count-- <= 0) {
122 			smp_yield();
123 			count = spin_retry;
124 		}
125 		if (!arch_read_can_lock(rw))
126 			continue;
127 		old = rw->lock & 0x7fffffffU;
128 		if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
129 			return;
130 	}
131 }
132 EXPORT_SYMBOL(_raw_read_lock_wait);
133 
_raw_read_lock_wait_flags(arch_rwlock_t * rw,unsigned long flags)134 void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
135 {
136 	unsigned int old;
137 	int count = spin_retry;
138 
139 	local_irq_restore(flags);
140 	while (1) {
141 		if (count-- <= 0) {
142 			smp_yield();
143 			count = spin_retry;
144 		}
145 		if (!arch_read_can_lock(rw))
146 			continue;
147 		old = rw->lock & 0x7fffffffU;
148 		local_irq_disable();
149 		if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
150 			return;
151 	}
152 }
153 EXPORT_SYMBOL(_raw_read_lock_wait_flags);
154 
_raw_read_trylock_retry(arch_rwlock_t * rw)155 int _raw_read_trylock_retry(arch_rwlock_t *rw)
156 {
157 	unsigned int old;
158 	int count = spin_retry;
159 
160 	while (count-- > 0) {
161 		if (!arch_read_can_lock(rw))
162 			continue;
163 		old = rw->lock & 0x7fffffffU;
164 		if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
165 			return 1;
166 	}
167 	return 0;
168 }
169 EXPORT_SYMBOL(_raw_read_trylock_retry);
170 
_raw_write_lock_wait(arch_rwlock_t * rw)171 void _raw_write_lock_wait(arch_rwlock_t *rw)
172 {
173 	int count = spin_retry;
174 
175 	while (1) {
176 		if (count-- <= 0) {
177 			smp_yield();
178 			count = spin_retry;
179 		}
180 		if (!arch_write_can_lock(rw))
181 			continue;
182 		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
183 			return;
184 	}
185 }
186 EXPORT_SYMBOL(_raw_write_lock_wait);
187 
_raw_write_lock_wait_flags(arch_rwlock_t * rw,unsigned long flags)188 void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
189 {
190 	int count = spin_retry;
191 
192 	local_irq_restore(flags);
193 	while (1) {
194 		if (count-- <= 0) {
195 			smp_yield();
196 			count = spin_retry;
197 		}
198 		if (!arch_write_can_lock(rw))
199 			continue;
200 		local_irq_disable();
201 		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
202 			return;
203 	}
204 }
205 EXPORT_SYMBOL(_raw_write_lock_wait_flags);
206 
_raw_write_trylock_retry(arch_rwlock_t * rw)207 int _raw_write_trylock_retry(arch_rwlock_t *rw)
208 {
209 	int count = spin_retry;
210 
211 	while (count-- > 0) {
212 		if (!arch_write_can_lock(rw))
213 			continue;
214 		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
215 			return 1;
216 	}
217 	return 0;
218 }
219 EXPORT_SYMBOL(_raw_write_trylock_retry);
220