VirtualBox

source: vbox/trunk/src/recompiler_new/qemu-lock.h@ 18661

最後變更 在這個檔案從18661是 17040,由 vboxsync 提交於 16 年 前

recompiler_new: svn properties.

  • 屬性 svn:eol-style 設為 native
檔案大小: 6.9 KB
 
1/*
2 * Copyright (c) 2003 Fabrice Bellard
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18/*
19 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
20 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
21 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
22 * a choice of LGPL license versions is made available with the language indicating
23 * that LGPLv2 or any later version may be used, or where a choice of which version
24 * of the LGPL is applied is otherwise unspecified.
25 */
26
27
28/* Locking primitives. Most of this code should be redundant -
29 system emulation doesn't need/use locking, NPTL userspace uses
30 pthread mutexes, and non-NPTL userspace isn't threadsafe anyway.
31 In either case a spinlock is probably the wrong kind of lock.
32 Spinlocks are only good if you know annother CPU has the lock and is
33 likely to release it soon. In environments where you have more threads
34 than physical CPUs (the extreme case being a single CPU host) a spinlock
35 simply wastes CPU until the OS decides to preempt it. */
36#if defined(USE_NPTL)
37
38#include <pthread.h>
39#define spin_lock pthread_mutex_lock
40#define spin_unlock pthread_mutex_unlock
41#define spinlock_t pthread_mutex_t
42#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
43
44#else
45
46#if defined(__hppa__)
47
48typedef int spinlock_t[4];
49
50#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
51
52static inline void resetlock (spinlock_t *p)
53{
54 (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
55}
56
57#else
58
59typedef int spinlock_t;
60
61#define SPIN_LOCK_UNLOCKED 0
62
63#ifndef VBOX
64static inline void resetlock (spinlock_t *p)
65#else
66DECLINLINE(void) resetlock (spinlock_t *p)
67#endif
68{
69 *p = SPIN_LOCK_UNLOCKED;
70}
71
72#endif
73
74#ifdef VBOX
75DECLINLINE(int) testandset (int *p)
76{
77
78 return ASMAtomicCmpXchgU32((uint32_t volatile *)p, 1, 0);
79}
80#elif defined(__powerpc__)
81static inline int testandset (int *p)
82{
83 int ret;
84 __asm__ __volatile__ (
85 "0: lwarx %0,0,%1\n"
86 " xor. %0,%3,%0\n"
87 " bne 1f\n"
88 " stwcx. %2,0,%1\n"
89 " bne- 0b\n"
90 "1: "
91 : "=&r" (ret)
92 : "r" (p), "r" (1), "r" (0)
93 : "cr0", "memory");
94 return ret;
95}
96#elif defined(__i386__)
97static inline int testandset (int *p)
98{
99 long int readval = 0;
100
101 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
102 : "+m" (*p), "+a" (readval)
103 : "r" (1)
104 : "cc");
105 return readval;
106}
107#elif defined(__x86_64__)
108static inline int testandset (int *p)
109{
110 long int readval = 0;
111
112 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
113 : "+m" (*p), "+a" (readval)
114 : "r" (1)
115 : "cc");
116 return readval;
117}
118#elif defined(__s390__)
119static inline int testandset (int *p)
120{
121 int ret;
122
123 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
124 " jl 0b"
125 : "=&d" (ret)
126 : "r" (1), "a" (p), "0" (*p)
127 : "cc", "memory" );
128 return ret;
129}
130#elif defined(__alpha__)
131static inline int testandset (int *p)
132{
133 int ret;
134 unsigned long one;
135
136 __asm__ __volatile__ ("0: mov 1,%2\n"
137 " ldl_l %0,%1\n"
138 " stl_c %2,%1\n"
139 " beq %2,1f\n"
140 ".subsection 2\n"
141 "1: br 0b\n"
142 ".previous"
143 : "=r" (ret), "=m" (*p), "=r" (one)
144 : "m" (*p));
145 return ret;
146}
147#elif defined(__sparc__)
148static inline int testandset (int *p)
149{
150 int ret;
151
152 __asm__ __volatile__("ldstub [%1], %0"
153 : "=r" (ret)
154 : "r" (p)
155 : "memory");
156
157 return (ret ? 1 : 0);
158}
159#elif defined(__arm__)
160static inline int testandset (int *spinlock)
161{
162 register unsigned int ret;
163 __asm__ __volatile__("swp %0, %1, [%2]"
164 : "=r"(ret)
165 : "0"(1), "r"(spinlock));
166
167 return ret;
168}
169#elif defined(__mc68000)
170static inline int testandset (int *p)
171{
172 char ret;
173 __asm__ __volatile__("tas %1; sne %0"
174 : "=r" (ret)
175 : "m" (p)
176 : "cc","memory");
177 return ret;
178}
179#elif defined(__hppa__)
180
181/* Because malloc only guarantees 8-byte alignment for malloc'd data,
182 and GCC only guarantees 8-byte alignment for stack locals, we can't
183 be assured of 16-byte alignment for atomic lock data even if we
184 specify "__attribute ((aligned(16)))" in the type declaration. So,
185 we use a struct containing an array of four ints for the atomic lock
186 type and dynamically select the 16-byte aligned int from the array
187 for the semaphore. */
188#define __PA_LDCW_ALIGNMENT 16
189static inline void *ldcw_align (void *p) {
190 unsigned long a = (unsigned long)p;
191 a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
192 return (void *)a;
193}
194
195static inline int testandset (spinlock_t *p)
196{
197 unsigned int ret;
198 p = ldcw_align(p);
199 __asm__ __volatile__("ldcw 0(%1),%0"
200 : "=r" (ret)
201 : "r" (p)
202 : "memory" );
203 return !ret;
204}
205
206#elif defined(__ia64)
207
208#include <ia64intrin.h>
209
210static inline int testandset (int *p)
211{
212 return __sync_lock_test_and_set (p, 1);
213}
214#elif defined(__mips__)
215static inline int testandset (int *p)
216{
217 int ret;
218
219 __asm__ __volatile__ (
220 " .set push \n"
221 " .set noat \n"
222 " .set mips2 \n"
223 "1: li $1, 1 \n"
224 " ll %0, %1 \n"
225 " sc $1, %1 \n"
226 " beqz $1, 1b \n"
227 " .set pop "
228 : "=r" (ret), "+R" (*p)
229 :
230 : "memory");
231
232 return ret;
233}
234#else
235#error unimplemented CPU support
236#endif
237
238#if defined(CONFIG_USER_ONLY)
239static inline void spin_lock(spinlock_t *lock)
240{
241 while (testandset(lock));
242}
243
244static inline void spin_unlock(spinlock_t *lock)
245{
246 resetlock(lock);
247}
248
249static inline int spin_trylock(spinlock_t *lock)
250{
251 return !testandset(lock);
252}
253#else
254#ifndef VBOX
255static inline void spin_lock(spinlock_t *lock)
256#else
257DECLINLINE(void) spin_lock(spinlock_t *lock)
258#endif
259{
260}
261
262#ifndef VBOX
263static inline void spin_unlock(spinlock_t *lock)
264#else
265DECLINLINE(void) spin_unlock(spinlock_t *lock)
266#endif
267{
268}
269
270#ifndef VBOX
271static inline int spin_trylock(spinlock_t *lock)
272#else
273DECLINLINE(int) spin_trylock(spinlock_t *lock)
274#endif
275{
276 return 1;
277}
278#endif
279
280#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette