• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  */
14 
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <asm/processor.h>
18 #include <arch/spr_def.h>
19 
20 #include "spinlock_common.h"
21 
arch_spin_lock(arch_spinlock_t * lock)22 void arch_spin_lock(arch_spinlock_t *lock)
23 {
24 	int my_ticket;
25 	int iterations = 0;
26 	int delta;
27 
28 	while ((my_ticket = __insn_tns((void *)&lock->next_ticket)) & 1)
29 		delay_backoff(iterations++);
30 
31 	/* Increment the next ticket number, implicitly releasing tns lock. */
32 	lock->next_ticket = my_ticket + TICKET_QUANTUM;
33 
34 	/* Wait until it's our turn. */
35 	while ((delta = my_ticket - lock->current_ticket) != 0)
36 		relax((128 / CYCLES_PER_RELAX_LOOP) * delta);
37 }
38 EXPORT_SYMBOL(arch_spin_lock);
39 
arch_spin_trylock(arch_spinlock_t * lock)40 int arch_spin_trylock(arch_spinlock_t *lock)
41 {
42 	/*
43 	 * Grab a ticket; no need to retry if it's busy, we'll just
44 	 * treat that the same as "locked", since someone else
45 	 * will lock it momentarily anyway.
46 	 */
47 	int my_ticket = __insn_tns((void *)&lock->next_ticket);
48 
49 	if (my_ticket == lock->current_ticket) {
50 		/* Not currently locked, so lock it by keeping this ticket. */
51 		lock->next_ticket = my_ticket + TICKET_QUANTUM;
52 		/* Success! */
53 		return 1;
54 	}
55 
56 	if (!(my_ticket & 1)) {
57 		/* Release next_ticket. */
58 		lock->next_ticket = my_ticket;
59 	}
60 
61 	return 0;
62 }
63 EXPORT_SYMBOL(arch_spin_trylock);
64 
arch_spin_unlock_wait(arch_spinlock_t * lock)65 void arch_spin_unlock_wait(arch_spinlock_t *lock)
66 {
67 	u32 iterations = 0;
68 	int curr = READ_ONCE(lock->current_ticket);
69 	int next = READ_ONCE(lock->next_ticket);
70 
71 	/* Return immediately if unlocked. */
72 	if (next == curr)
73 		return;
74 
75 	/* Wait until the current locker has released the lock. */
76 	do {
77 		delay_backoff(iterations++);
78 	} while (READ_ONCE(lock->current_ticket) == curr);
79 
80 	/*
81 	 * The TILE architecture doesn't do read speculation; therefore
82 	 * a control dependency guarantees a LOAD->{LOAD,STORE} order.
83 	 */
84 	barrier();
85 }
86 EXPORT_SYMBOL(arch_spin_unlock_wait);
87 
88 /*
89  * The low byte is always reserved to be the marker for a "tns" operation
90  * since the low bit is set to "1" by a tns.  The next seven bits are
91  * zeroes.  The next byte holds the "next" writer value, i.e. the ticket
92  * available for the next task that wants to write.  The third byte holds
93  * the current writer value, i.e. the writer who holds the current ticket.
94  * If current == next == 0, there are no interested writers.
95  */
96 #define WR_NEXT_SHIFT   _WR_NEXT_SHIFT
97 #define WR_CURR_SHIFT   _WR_CURR_SHIFT
98 #define WR_WIDTH        _WR_WIDTH
99 #define WR_MASK         ((1 << WR_WIDTH) - 1)
100 
101 /*
102  * The last eight bits hold the active reader count.  This has to be
103  * zero before a writer can start to write.
104  */
105 #define RD_COUNT_SHIFT  _RD_COUNT_SHIFT
106 #define RD_COUNT_WIDTH  _RD_COUNT_WIDTH
107 #define RD_COUNT_MASK   ((1 << RD_COUNT_WIDTH) - 1)
108 
109 
110 /*
111  * We can get the read lock if everything but the reader bits (which
112  * are in the high part of the word) is zero, i.e. no active or
113  * waiting writers, no tns.
114  *
115  * We guard the tns/store-back with an interrupt critical section to
116  * preserve the semantic that the same read lock can be acquired in an
117  * interrupt context.
118  */
arch_read_trylock(arch_rwlock_t * rwlock)119 int arch_read_trylock(arch_rwlock_t *rwlock)
120 {
121 	u32 val;
122 	__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
123 	val = __insn_tns((int *)&rwlock->lock);
124 	if (likely((val << _RD_COUNT_WIDTH) == 0)) {
125 		val += 1 << RD_COUNT_SHIFT;
126 		rwlock->lock = val;
127 		__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
128 		BUG_ON(val == 0);  /* we don't expect wraparound */
129 		return 1;
130 	}
131 	if ((val & 1) == 0)
132 		rwlock->lock = val;
133 	__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
134 	return 0;
135 }
136 EXPORT_SYMBOL(arch_read_trylock);
137 
138 /*
139  * Spin doing arch_read_trylock() until we acquire the lock.
140  * ISSUE: This approach can permanently starve readers.  A reader who sees
141  * a writer could instead take a ticket lock (just like a writer would),
142  * and atomically enter read mode (with 1 reader) when it gets the ticket.
143  * This way both readers and writers would always make forward progress
144  * in a finite time.
145  */
arch_read_lock(arch_rwlock_t * rwlock)146 void arch_read_lock(arch_rwlock_t *rwlock)
147 {
148 	u32 iterations = 0;
149 	while (unlikely(!arch_read_trylock(rwlock)))
150 		delay_backoff(iterations++);
151 }
152 EXPORT_SYMBOL(arch_read_lock);
153 
arch_read_unlock(arch_rwlock_t * rwlock)154 void arch_read_unlock(arch_rwlock_t *rwlock)
155 {
156 	u32 val, iterations = 0;
157 
158 	mb();  /* guarantee anything modified under the lock is visible */
159 	for (;;) {
160 		__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
161 		val = __insn_tns((int *)&rwlock->lock);
162 		if (likely((val & 1) == 0)) {
163 			rwlock->lock = val - (1 << _RD_COUNT_SHIFT);
164 			__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
165 			break;
166 		}
167 		__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
168 		delay_backoff(iterations++);
169 	}
170 }
171 EXPORT_SYMBOL(arch_read_unlock);
172 
173 /*
174  * We don't need an interrupt critical section here (unlike for
175  * arch_read_lock) since we should never use a bare write lock where
176  * it could be interrupted by code that could try to re-acquire it.
177  */
arch_write_lock(arch_rwlock_t * rwlock)178 void arch_write_lock(arch_rwlock_t *rwlock)
179 {
180 	/*
181 	 * The trailing underscore on this variable (and curr_ below)
182 	 * reminds us that the high bits are garbage; we mask them out
183 	 * when we compare them.
184 	 */
185 	u32 my_ticket_;
186 	u32 iterations = 0;
187 	u32 val = __insn_tns((int *)&rwlock->lock);
188 
189 	if (likely(val == 0)) {
190 		rwlock->lock = 1 << _WR_NEXT_SHIFT;
191 		return;
192 	}
193 
194 	/*
195 	 * Wait until there are no readers, then bump up the next
196 	 * field and capture the ticket value.
197 	 */
198 	for (;;) {
199 		if (!(val & 1)) {
200 			if ((val >> RD_COUNT_SHIFT) == 0)
201 				break;
202 			rwlock->lock = val;
203 		}
204 		delay_backoff(iterations++);
205 		val = __insn_tns((int *)&rwlock->lock);
206 	}
207 
208 	/* Take out the next ticket and extract my ticket value. */
209 	rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
210 	my_ticket_ = val >> WR_NEXT_SHIFT;
211 
212 	/* Wait until the "current" field matches our ticket. */
213 	for (;;) {
214 		u32 curr_ = val >> WR_CURR_SHIFT;
215 		u32 delta = ((my_ticket_ - curr_) & WR_MASK);
216 		if (likely(delta == 0))
217 			break;
218 
219 		/* Delay based on how many lock-holders are still out there. */
220 		relax((256 / CYCLES_PER_RELAX_LOOP) * delta);
221 
222 		/*
223 		 * Get a non-tns value to check; we don't need to tns
224 		 * it ourselves.  Since we're not tns'ing, we retry
225 		 * more rapidly to get a valid value.
226 		 */
227 		while ((val = rwlock->lock) & 1)
228 			relax(4);
229 	}
230 }
231 EXPORT_SYMBOL(arch_write_lock);
232 
arch_write_trylock(arch_rwlock_t * rwlock)233 int arch_write_trylock(arch_rwlock_t *rwlock)
234 {
235 	u32 val = __insn_tns((int *)&rwlock->lock);
236 
237 	/*
238 	 * If a tns is in progress, or there's a waiting or active locker,
239 	 * or active readers, we can't take the lock, so give up.
240 	 */
241 	if (unlikely(val != 0)) {
242 		if (!(val & 1))
243 			rwlock->lock = val;
244 		return 0;
245 	}
246 
247 	/* Set the "next" field to mark it locked. */
248 	rwlock->lock = 1 << _WR_NEXT_SHIFT;
249 	return 1;
250 }
251 EXPORT_SYMBOL(arch_write_trylock);
252 
arch_write_unlock(arch_rwlock_t * rwlock)253 void arch_write_unlock(arch_rwlock_t *rwlock)
254 {
255 	u32 val, eq, mask;
256 
257 	mb();  /* guarantee anything modified under the lock is visible */
258 	val = __insn_tns((int *)&rwlock->lock);
259 	if (likely(val == (1 << _WR_NEXT_SHIFT))) {
260 		rwlock->lock = 0;
261 		return;
262 	}
263 	while (unlikely(val & 1)) {
264 		/* Limited backoff since we are the highest-priority task. */
265 		relax(4);
266 		val = __insn_tns((int *)&rwlock->lock);
267 	}
268 	mask = 1 << WR_CURR_SHIFT;
269 	val = __insn_addb(val, mask);
270 	eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT));
271 	val = __insn_mz(eq & mask, val);
272 	rwlock->lock = val;
273 }
274 EXPORT_SYMBOL(arch_write_unlock);
275