blob: 9a3e6acce8e433bdd92a57f392df634e3e07b415 [file] [log] [blame]
pbrookd5975362008-06-07 20:50:51 +00001/*
2 * Copyright (c) 2003 Fabrice Bellard
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000015 * License along with this library; if not, see <http://www.gnu.org/licenses/>
pbrookd5975362008-06-07 20:50:51 +000016 */
17
18/* Locking primitives. Most of this code should be redundant -
19 system emulation doesn't need/use locking, NPTL userspace uses
20 pthread mutexes, and non-NPTL userspace isn't threadsafe anyway.
21 In either case a spinlock is probably the wrong kind of lock.
22 Spinlocks are only good if you know annother CPU has the lock and is
23 likely to release it soon. In environments where you have more threads
24 than physical CPUs (the extreme case being a single CPU host) a spinlock
25 simply wastes CPU until the OS decides to preempt it. */
Juan Quintela2f7bb872009-07-27 16:13:24 +020026#if defined(CONFIG_USE_NPTL)
pbrookd5975362008-06-07 20:50:51 +000027
28#include <pthread.h>
29#define spin_lock pthread_mutex_lock
30#define spin_unlock pthread_mutex_unlock
Anthony Liguoric227f092009-10-01 16:12:16 -050031#define spinlock_t pthread_mutex_t
pbrookd5975362008-06-07 20:50:51 +000032#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
33
34#else
35
36#if defined(__hppa__)
37
Anthony Liguoric227f092009-10-01 16:12:16 -050038typedef int spinlock_t[4];
pbrookd5975362008-06-07 20:50:51 +000039
40#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
41
Anthony Liguoric227f092009-10-01 16:12:16 -050042static inline void resetlock (spinlock_t *p)
pbrookd5975362008-06-07 20:50:51 +000043{
44 (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
45}
46
47#else
48
Anthony Liguoric227f092009-10-01 16:12:16 -050049typedef int spinlock_t;
pbrookd5975362008-06-07 20:50:51 +000050
51#define SPIN_LOCK_UNLOCKED 0
52
Anthony Liguoric227f092009-10-01 16:12:16 -050053static inline void resetlock (spinlock_t *p)
pbrookd5975362008-06-07 20:50:51 +000054{
55 *p = SPIN_LOCK_UNLOCKED;
56}
57
58#endif
59
malce58ffeb2009-01-14 18:39:49 +000060#if defined(_ARCH_PPC)
pbrookd5975362008-06-07 20:50:51 +000061static inline int testandset (int *p)
62{
63 int ret;
64 __asm__ __volatile__ (
malc14f87092008-11-18 01:42:17 +000065 " lwarx %0,0,%1\n"
pbrookd5975362008-06-07 20:50:51 +000066 " xor. %0,%3,%0\n"
malc14f87092008-11-18 01:42:17 +000067 " bne $+12\n"
pbrookd5975362008-06-07 20:50:51 +000068 " stwcx. %2,0,%1\n"
malc14f87092008-11-18 01:42:17 +000069 " bne- $-16\n"
pbrookd5975362008-06-07 20:50:51 +000070 : "=&r" (ret)
71 : "r" (p), "r" (1), "r" (0)
72 : "cr0", "memory");
73 return ret;
74}
75#elif defined(__i386__)
76static inline int testandset (int *p)
77{
78 long int readval = 0;
79
80 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
81 : "+m" (*p), "+a" (readval)
82 : "r" (1)
83 : "cc");
84 return readval;
85}
86#elif defined(__x86_64__)
87static inline int testandset (int *p)
88{
89 long int readval = 0;
90
91 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
92 : "+m" (*p), "+a" (readval)
93 : "r" (1)
94 : "cc");
95 return readval;
96}
97#elif defined(__s390__)
98static inline int testandset (int *p)
99{
100 int ret;
101
102 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
103 " jl 0b"
104 : "=&d" (ret)
105 : "r" (1), "a" (p), "0" (*p)
106 : "cc", "memory" );
107 return ret;
108}
109#elif defined(__alpha__)
110static inline int testandset (int *p)
111{
112 int ret;
113 unsigned long one;
114
115 __asm__ __volatile__ ("0: mov 1,%2\n"
116 " ldl_l %0,%1\n"
117 " stl_c %2,%1\n"
118 " beq %2,1f\n"
119 ".subsection 2\n"
120 "1: br 0b\n"
121 ".previous"
122 : "=r" (ret), "=m" (*p), "=r" (one)
123 : "m" (*p));
124 return ret;
125}
126#elif defined(__sparc__)
127static inline int testandset (int *p)
128{
129 int ret;
130
131 __asm__ __volatile__("ldstub [%1], %0"
132 : "=r" (ret)
133 : "r" (p)
134 : "memory");
135
136 return (ret ? 1 : 0);
137}
138#elif defined(__arm__)
139static inline int testandset (int *spinlock)
140{
141 register unsigned int ret;
142 __asm__ __volatile__("swp %0, %1, [%2]"
143 : "=r"(ret)
144 : "0"(1), "r"(spinlock));
145
146 return ret;
147}
148#elif defined(__mc68000)
149static inline int testandset (int *p)
150{
151 char ret;
152 __asm__ __volatile__("tas %1; sne %0"
153 : "=r" (ret)
154 : "m" (p)
155 : "cc","memory");
156 return ret;
157}
158#elif defined(__hppa__)
159
160/* Because malloc only guarantees 8-byte alignment for malloc'd data,
161 and GCC only guarantees 8-byte alignment for stack locals, we can't
162 be assured of 16-byte alignment for atomic lock data even if we
163 specify "__attribute ((aligned(16)))" in the type declaration. So,
164 we use a struct containing an array of four ints for the atomic lock
165 type and dynamically select the 16-byte aligned int from the array
166 for the semaphore. */
167#define __PA_LDCW_ALIGNMENT 16
168static inline void *ldcw_align (void *p) {
169 unsigned long a = (unsigned long)p;
170 a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
171 return (void *)a;
172}
173
Anthony Liguoric227f092009-10-01 16:12:16 -0500174static inline int testandset (spinlock_t *p)
pbrookd5975362008-06-07 20:50:51 +0000175{
176 unsigned int ret;
177 p = ldcw_align(p);
178 __asm__ __volatile__("ldcw 0(%1),%0"
179 : "=r" (ret)
180 : "r" (p)
181 : "memory" );
182 return !ret;
183}
184
185#elif defined(__ia64)
186
187#include <ia64intrin.h>
188
189static inline int testandset (int *p)
190{
191 return __sync_lock_test_and_set (p, 1);
192}
193#elif defined(__mips__)
194static inline int testandset (int *p)
195{
196 int ret;
197
198 __asm__ __volatile__ (
199 " .set push \n"
200 " .set noat \n"
201 " .set mips2 \n"
202 "1: li $1, 1 \n"
203 " ll %0, %1 \n"
204 " sc $1, %1 \n"
205 " beqz $1, 1b \n"
206 " .set pop "
207 : "=r" (ret), "+R" (*p)
208 :
209 : "memory");
210
211 return ret;
212}
213#else
214#error unimplemented CPU support
215#endif
216
217#if defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500218static inline void spin_lock(spinlock_t *lock)
pbrookd5975362008-06-07 20:50:51 +0000219{
220 while (testandset(lock));
221}
222
Anthony Liguoric227f092009-10-01 16:12:16 -0500223static inline void spin_unlock(spinlock_t *lock)
pbrookd5975362008-06-07 20:50:51 +0000224{
225 resetlock(lock);
226}
227
Anthony Liguoric227f092009-10-01 16:12:16 -0500228static inline int spin_trylock(spinlock_t *lock)
pbrookd5975362008-06-07 20:50:51 +0000229{
230 return !testandset(lock);
231}
232#else
Anthony Liguoric227f092009-10-01 16:12:16 -0500233static inline void spin_lock(spinlock_t *lock)
pbrookd5975362008-06-07 20:50:51 +0000234{
235}
236
Anthony Liguoric227f092009-10-01 16:12:16 -0500237static inline void spin_unlock(spinlock_t *lock)
pbrookd5975362008-06-07 20:50:51 +0000238{
239}
240
Anthony Liguoric227f092009-10-01 16:12:16 -0500241static inline int spin_trylock(spinlock_t *lock)
pbrookd5975362008-06-07 20:50:51 +0000242{
243 return 1;
244}
245#endif
246
247#endif