VirtualBox

source: vbox/trunk/src/recompiler/tcg/tcg-dyngen.c@ 21918

最後變更 在這個檔案從21918是 18083,由 vboxsync 提交於 16 年 前

eol

  • 屬性 svn:eol-style 設為 native
檔案大小: 13.0 KB
 
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24/*
25 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
26 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
27 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
28 * a choice of LGPL license versions is made available with the language indicating
29 * that LGPLv2 or any later version may be used, or where a choice of which version
30 * of the LGPL is applied is otherwise unspecified.
31 */
32
33#ifndef VBOX
34#include <assert.h>
35#include <stdarg.h>
36#include <stdlib.h>
37#include <stdio.h>
38#include <string.h>
39#include <inttypes.h>
40#else
41#include <stdio.h>
42#include "osdep.h"
43#endif
44
45#include "config.h"
46#include "osdep.h"
47
48#include "tcg.h"
49
50int __op_param1, __op_param2, __op_param3;
51#if defined(__sparc__) || defined(__arm__)
52 void __op_gen_label1(){}
53 void __op_gen_label2(){}
54 void __op_gen_label3(){}
55#else
56 int __op_gen_label1, __op_gen_label2, __op_gen_label3;
57#endif
58int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3;
59
60#if 0
61#if defined(__s390__)
62static inline void flush_icache_range(unsigned long start, unsigned long stop)
63{
64}
65#elif defined(__ia64__)
66static inline void flush_icache_range(unsigned long start, unsigned long stop)
67{
68 while (start < stop) {
69 asm volatile ("fc %0" :: "r"(start));
70 start += 32;
71 }
72 asm volatile (";;sync.i;;srlz.i;;");
73}
74#elif defined(__powerpc__)
75
76#define MIN_CACHE_LINE_SIZE 8 /* conservative value */
77
78static inline void flush_icache_range(unsigned long start, unsigned long stop)
79{
80 unsigned long p;
81
82 start &= ~(MIN_CACHE_LINE_SIZE - 1);
83 stop = (stop + MIN_CACHE_LINE_SIZE - 1) & ~(MIN_CACHE_LINE_SIZE - 1);
84
85 for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
86 asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
87 }
88 asm volatile ("sync" : : : "memory");
89 for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
90 asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
91 }
92 asm volatile ("sync" : : : "memory");
93 asm volatile ("isync" : : : "memory");
94}
95#elif defined(__alpha__)
96static inline void flush_icache_range(unsigned long start, unsigned long stop)
97{
98 asm ("imb");
99}
100#elif defined(__sparc__)
101static inline void flush_icache_range(unsigned long start, unsigned long stop)
102{
103 unsigned long p;
104
105 p = start & ~(8UL - 1UL);
106 stop = (stop + (8UL - 1UL)) & ~(8UL - 1UL);
107
108 for (; p < stop; p += 8)
109 __asm__ __volatile__("flush\t%0" : : "r" (p));
110}
111#elif defined(__arm__)
112static inline void flush_icache_range(unsigned long start, unsigned long stop)
113{
114 register unsigned long _beg __asm ("a1") = start;
115 register unsigned long _end __asm ("a2") = stop;
116 register unsigned long _flg __asm ("a3") = 0;
117 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
118}
119#elif defined(__mc68000)
120
121# include <asm/cachectl.h>
122static inline void flush_icache_range(unsigned long start, unsigned long stop)
123{
124 cacheflush(start,FLUSH_SCOPE_LINE,FLUSH_CACHE_BOTH,stop-start+16);
125}
126#elif defined(__mips__)
127
128#include <sys/cachectl.h>
129static inline void flush_icache_range(unsigned long start, unsigned long stop)
130{
131 _flush_cache ((void *)start, stop - start, BCACHE);
132}
133#else
134#error unsupported CPU
135#endif
136
137#ifdef __alpha__
138
139register int gp asm("$29");
140
141static inline void immediate_ldah(void *p, int val) {
142 uint32_t *dest = p;
143 long high = ((val >> 16) + ((val >> 15) & 1)) & 0xffff;
144
145 *dest &= ~0xffff;
146 *dest |= high;
147 *dest |= 31 << 16;
148}
149static inline void immediate_lda(void *dest, int val) {
150 *(uint16_t *) dest = val;
151}
152void fix_bsr(void *p, int offset) {
153 uint32_t *dest = p;
154 *dest &= ~((1 << 21) - 1);
155 *dest |= (offset >> 2) & ((1 << 21) - 1);
156}
157
158#endif /* __alpha__ */
159
160#ifdef __ia64
161
162/* Patch instruction with "val" where "mask" has 1 bits. */
163static inline void ia64_patch (uint64_t insn_addr, uint64_t mask, uint64_t val)
164{
165 uint64_t m0, m1, v0, v1, b0, b1, *b = (uint64_t *) (insn_addr & -16);
166# define insn_mask ((1UL << 41) - 1)
167 unsigned long shift;
168
169 b0 = b[0]; b1 = b[1];
170 shift = 5 + 41 * (insn_addr % 16); /* 5 template, 3 x 41-bit insns */
171 if (shift >= 64) {
172 m1 = mask << (shift - 64);
173 v1 = val << (shift - 64);
174 } else {
175 m0 = mask << shift; m1 = mask >> (64 - shift);
176 v0 = val << shift; v1 = val >> (64 - shift);
177 b[0] = (b0 & ~m0) | (v0 & m0);
178 }
179 b[1] = (b1 & ~m1) | (v1 & m1);
180}
181
182static inline void ia64_patch_imm60 (uint64_t insn_addr, uint64_t val)
183{
184 ia64_patch(insn_addr,
185 0x011ffffe000UL,
186 ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */
187 | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */));
188 ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18);
189}
190
191static inline void ia64_imm64 (void *insn, uint64_t val)
192{
193 /* Ignore the slot number of the relocation; GCC and Intel
194 toolchains differed for some time on whether IMM64 relocs are
195 against slot 1 (Intel) or slot 2 (GCC). */
196 uint64_t insn_addr = (uint64_t) insn & ~3UL;
197
198 ia64_patch(insn_addr + 2,
199 0x01fffefe000UL,
200 ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */
201 | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */
202 | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */
203 | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */
204 | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */)
205 );
206 ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22);
207}
208
209static inline void ia64_imm60b (void *insn, uint64_t val)
210{
211 /* Ignore the slot number of the relocation; GCC and Intel
212 toolchains differed for some time on whether IMM64 relocs are
213 against slot 1 (Intel) or slot 2 (GCC). */
214 uint64_t insn_addr = (uint64_t) insn & ~3UL;
215
216 if (val + ((uint64_t) 1 << 59) >= (1UL << 60))
217 fprintf(stderr, "%s: value %ld out of IMM60 range\n",
218 __FUNCTION__, (int64_t) val);
219 ia64_patch_imm60(insn_addr + 2, val);
220}
221
222static inline void ia64_imm22 (void *insn, uint64_t val)
223{
224 if (val + (1 << 21) >= (1 << 22))
225 fprintf(stderr, "%s: value %li out of IMM22 range\n",
226 __FUNCTION__, (int64_t)val);
227 ia64_patch((uint64_t) insn, 0x01fffcfe000UL,
228 ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
229 | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
230 | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
231 | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
232}
233
234/* Like ia64_imm22(), but also clear bits 20-21. For addl, this has
235 the effect of turning "addl rX=imm22,rY" into "addl
236 rX=imm22,r0". */
237static inline void ia64_imm22_r0 (void *insn, uint64_t val)
238{
239 if (val + (1 << 21) >= (1 << 22))
240 fprintf(stderr, "%s: value %li out of IMM22 range\n",
241 __FUNCTION__, (int64_t)val);
242 ia64_patch((uint64_t) insn, 0x01fffcfe000UL | (0x3UL << 20),
243 ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
244 | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
245 | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
246 | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
247}
248
249static inline void ia64_imm21b (void *insn, uint64_t val)
250{
251 if (val + (1 << 20) >= (1 << 21))
252 fprintf(stderr, "%s: value %li out of IMM21b range\n",
253 __FUNCTION__, (int64_t)val);
254 ia64_patch((uint64_t) insn, 0x11ffffe000UL,
255 ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
256 | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
257}
258
259static inline void ia64_nop_b (void *insn)
260{
261 ia64_patch((uint64_t) insn, (1UL << 41) - 1, 2UL << 37);
262}
263
264static inline void ia64_ldxmov(void *insn, uint64_t val)
265{
266 if (val + (1 << 21) < (1 << 22))
267 ia64_patch((uint64_t) insn, 0x1fff80fe000UL, 8UL << 37);
268}
269
270static inline int ia64_patch_ltoff(void *insn, uint64_t val,
271 int relaxable)
272{
273 if (relaxable && (val + (1 << 21) < (1 << 22))) {
274 ia64_imm22_r0(insn, val);
275 return 0;
276 }
277 return 1;
278}
279
280struct ia64_fixup {
281 struct ia64_fixup *next;
282 void *addr; /* address that needs to be patched */
283 long value;
284};
285
286#define IA64_PLT(insn, plt_index) \
287do { \
288 struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \
289 fixup->next = plt_fixes; \
290 plt_fixes = fixup; \
291 fixup->addr = (insn); \
292 fixup->value = (plt_index); \
293 plt_offset[(plt_index)] = 1; \
294} while (0)
295
296#define IA64_LTOFF(insn, val, relaxable) \
297do { \
298 if (ia64_patch_ltoff(insn, val, relaxable)) { \
299 struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \
300 fixup->next = ltoff_fixes; \
301 ltoff_fixes = fixup; \
302 fixup->addr = (insn); \
303 fixup->value = (val); \
304 } \
305} while (0)
306
307static inline void ia64_apply_fixes (uint8_t **gen_code_pp,
308 struct ia64_fixup *ltoff_fixes,
309 uint64_t gp,
310 struct ia64_fixup *plt_fixes,
311 int num_plts,
312 unsigned long *plt_target,
313 unsigned int *plt_offset)
314{
315 static const uint8_t plt_bundle[] = {
316 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; movl r1=GP */
317 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x60,
318
319 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; brl IP */
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0
321 };
322 uint8_t *gen_code_ptr = *gen_code_pp, *plt_start, *got_start;
323 uint64_t *vp;
324 struct ia64_fixup *fixup;
325 unsigned int offset = 0;
326 struct fdesc {
327 long ip;
328 long gp;
329 } *fdesc;
330 int i;
331
332 if (plt_fixes) {
333 plt_start = gen_code_ptr;
334
335 for (i = 0; i < num_plts; ++i) {
336 if (plt_offset[i]) {
337 plt_offset[i] = offset;
338 offset += sizeof(plt_bundle);
339
340 fdesc = (struct fdesc *) plt_target[i];
341 memcpy(gen_code_ptr, plt_bundle, sizeof(plt_bundle));
342 ia64_imm64 (gen_code_ptr + 0x02, fdesc->gp);
343 ia64_imm60b(gen_code_ptr + 0x12,
344 (fdesc->ip - (long) (gen_code_ptr + 0x10)) >> 4);
345 gen_code_ptr += sizeof(plt_bundle);
346 }
347 }
348
349 for (fixup = plt_fixes; fixup; fixup = fixup->next)
350 ia64_imm21b(fixup->addr,
351 ((long) plt_start + plt_offset[fixup->value]
352 - ((long) fixup->addr & ~0xf)) >> 4);
353 }
354
355 got_start = gen_code_ptr;
356
357 /* First, create the GOT: */
358 for (fixup = ltoff_fixes; fixup; fixup = fixup->next) {
359 /* first check if we already have this value in the GOT: */
360 for (vp = (uint64_t *) got_start; vp < (uint64_t *) gen_code_ptr; ++vp)
361 if (*vp == fixup->value)
362 break;
363 if (vp == (uint64_t *) gen_code_ptr) {
364 /* Nope, we need to put the value in the GOT: */
365 *vp = fixup->value;
366 gen_code_ptr += 8;
367 }
368 ia64_imm22(fixup->addr, (long) vp - gp);
369 }
370 /* Keep code ptr aligned. */
371 if ((long) gen_code_ptr & 15)
372 gen_code_ptr += 8;
373 *gen_code_pp = gen_code_ptr;
374}
375#endif
376#endif
377
378#ifdef CONFIG_DYNGEN_OP
379
380#if defined __hppa__
381struct hppa_branch_stub {
382 uint32_t *location;
383 long target;
384 struct hppa_branch_stub *next;
385};
386
387#define HPPA_RECORD_BRANCH(LIST, LOC, TARGET) \
388do { \
389 struct hppa_branch_stub *stub = alloca(sizeof(struct hppa_branch_stub)); \
390 stub->location = LOC; \
391 stub->target = TARGET; \
392 stub->next = LIST; \
393 LIST = stub; \
394} while (0)
395
396static inline void hppa_process_stubs(struct hppa_branch_stub *stub,
397 uint8_t **gen_code_pp)
398{
399 uint32_t *s = (uint32_t *)*gen_code_pp;
400 uint32_t *p = s + 1;
401
402 if (!stub) return;
403
404 for (; stub != NULL; stub = stub->next) {
405 unsigned long l = (unsigned long)p;
406 /* stub:
407 * ldil L'target, %r1
408 * be,n R'target(%sr4,%r1)
409 */
410 *p++ = 0x20200000 | reassemble_21(lrsel(stub->target, 0));
411 *p++ = 0xe0202002 | (reassemble_17(rrsel(stub->target, 0) >> 2));
412 hppa_patch17f(stub->location, l, 0);
413 }
414 /* b,l,n stub,%r0 */
415 *s = 0xe8000002 | reassemble_17((p - s) - 2);
416 *gen_code_pp = (uint8_t *)p;
417}
418#endif /* __hppa__ */
419
420const TCGArg *dyngen_op(TCGContext *s, int opc, const TCGArg *opparam_ptr)
421{
422 uint8_t *gen_code_ptr;
423
424#ifdef __hppa__
425 struct hppa_branch_stub *hppa_stubs = NULL;
426#endif
427
428 gen_code_ptr = s->code_ptr;
429 switch(opc) {
430
431/* op.h is dynamically generated by dyngen.c from op.c */
432#include "op.h"
433
434 default:
435 tcg_abort();
436 }
437
438#ifdef __hppa__
439 hppa_process_stubs(hppa_stubs, &gen_code_ptr);
440#endif
441
442 s->code_ptr = gen_code_ptr;
443 return opparam_ptr;
444}
445#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette