VirtualBox

source: vbox/trunk/src/recompiler/softmmu_template.h@ 62180

最後變更 在這個檔案從62180是 42601,由 vboxsync 提交於 12 年 前

REM: Initial changes to make it work (seemingly) with MinGW-w64.

  • 屬性 svn:eol-style 設為 native
檔案大小: 13.1 KB
 
1/*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "qemu-timer.h"
30
31#define DATA_SIZE (1 << SHIFT)
32
33#if DATA_SIZE == 8
34#define SUFFIX q
35#define USUFFIX q
36#define DATA_TYPE uint64_t
37#ifdef VBOX
38# define DATA_TYPE_PROMOTED uint64_t
39#endif
40#elif DATA_SIZE == 4
41#define SUFFIX l
42#define USUFFIX l
43#define DATA_TYPE uint32_t
44#ifdef VBOX
45# define DATA_TYPE_PROMOTED RTCCUINTREG
46#endif
47#elif DATA_SIZE == 2
48#define SUFFIX w
49#define USUFFIX uw
50#define DATA_TYPE uint16_t
51#ifdef VBOX
52# define DATA_TYPE_PROMOTED RTCCUINTREG
53#endif
54#elif DATA_SIZE == 1
55#define SUFFIX b
56#define USUFFIX ub
57#define DATA_TYPE uint8_t
58#ifdef VBOX
59# define DATA_TYPE_PROMOTED RTCCUINTREG
60#endif
61#else
62#error unsupported data size
63#endif
64
65#ifdef SOFTMMU_CODE_ACCESS
66#define READ_ACCESS_TYPE 2
67#define ADDR_READ addr_code
68#else
69#define READ_ACCESS_TYPE 0
70#define ADDR_READ addr_read
71#endif
72
73static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
74 int mmu_idx,
75 void *retaddr);
76static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
77 target_ulong addr,
78 void *retaddr)
79{
80 DATA_TYPE res;
81 int index;
82 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
83 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
84 env->mem_io_pc = (uintptr_t)retaddr;
85 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
86 && !can_do_io(env)) {
87 cpu_io_recompile(env, retaddr);
88 }
89
90 env->mem_io_vaddr = addr;
91#if SHIFT <= 2
92 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
93#else
94#ifdef TARGET_WORDS_BIGENDIAN
95 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
96 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
97#else
98 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
99 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
100#endif
101#endif /* SHIFT > 2 */
102 return res;
103}
104
105/* handle all cases except unaligned access which span two pages */
106#ifndef VBOX
107DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
108 int mmu_idx)
109#else
110/* Load helpers invoked from generated code, and TCG makes an assumption
111 that valid value takes the whole register, why gcc after 4.3 may
112 use only lower part of register for smaller types. So force promotion. */
113DATA_TYPE_PROMOTED REGPARM
114glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
115 int mmu_idx)
116#endif
117{
118 DATA_TYPE res;
119 int index;
120 target_ulong tlb_addr;
121 target_phys_addr_t ioaddr;
122 uintptr_t addend;
123 void *retaddr;
124
125 /* test if there is match for unaligned or IO access */
126 /* XXX: could done more in memory macro in a non portable way */
127 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
128 redo:
129 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
130 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
131 if (tlb_addr & ~TARGET_PAGE_MASK) {
132 /* IO access */
133 if ((addr & (DATA_SIZE - 1)) != 0)
134 goto do_unaligned_access;
135 retaddr = GETPC();
136 ioaddr = env->iotlb[mmu_idx][index];
137 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
138 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
139 /* slow unaligned access (it spans two pages or IO) */
140 do_unaligned_access:
141 retaddr = GETPC();
142#ifdef ALIGNED_ONLY
143 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
144#endif
145 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
146 mmu_idx, retaddr);
147 } else {
148 /* unaligned/aligned access in the same page */
149#ifdef ALIGNED_ONLY
150 if ((addr & (DATA_SIZE - 1)) != 0) {
151 retaddr = GETPC();
152 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
153 }
154#endif
155 addend = env->tlb_table[mmu_idx][index].addend;
156 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(uintptr_t)(addr+addend));
157 }
158 } else {
159 /* the page is not in the TLB : fill it */
160 retaddr = GETPC();
161#ifdef ALIGNED_ONLY
162 if ((addr & (DATA_SIZE - 1)) != 0)
163 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
164#endif
165 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
166 goto redo;
167 }
168 return res;
169}
170
171/* handle all unaligned cases */
172static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
173 int mmu_idx,
174 void *retaddr)
175{
176 DATA_TYPE res, res1, res2;
177 int index, shift;
178 target_phys_addr_t ioaddr;
179 uintptr_t addend;
180 target_ulong tlb_addr, addr1, addr2;
181
182 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
183 redo:
184 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
185 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
186 if (tlb_addr & ~TARGET_PAGE_MASK) {
187 /* IO access */
188 if ((addr & (DATA_SIZE - 1)) != 0)
189 goto do_unaligned_access;
190 ioaddr = env->iotlb[mmu_idx][index];
191 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
192 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
193 do_unaligned_access:
194 /* slow unaligned access (it spans two pages) */
195 addr1 = addr & ~(DATA_SIZE - 1);
196 addr2 = addr1 + DATA_SIZE;
197 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
198 mmu_idx, retaddr);
199 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
200 mmu_idx, retaddr);
201 shift = (addr & (DATA_SIZE - 1)) * 8;
202#ifdef TARGET_WORDS_BIGENDIAN
203 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
204#else
205 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
206#endif
207 res = (DATA_TYPE)res;
208 } else {
209 /* unaligned/aligned access in the same page */
210 addend = env->tlb_table[mmu_idx][index].addend;
211 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(uintptr_t)(addr+addend));
212 }
213 } else {
214 /* the page is not in the TLB : fill it */
215 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
216 goto redo;
217 }
218 return res;
219}
220
221#ifndef SOFTMMU_CODE_ACCESS
222
223static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
224 DATA_TYPE val,
225 int mmu_idx,
226 void *retaddr);
227
228static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
229 DATA_TYPE val,
230 target_ulong addr,
231 void *retaddr)
232{
233 int index;
234 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
235 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
236 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
237 && !can_do_io(env)) {
238 cpu_io_recompile(env, retaddr);
239 }
240
241 env->mem_io_vaddr = addr;
242 env->mem_io_pc = (uintptr_t)retaddr;
243#if SHIFT <= 2
244 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
245#else
246#ifdef TARGET_WORDS_BIGENDIAN
247 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
248 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
249#else
250 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
251 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
252#endif
253#endif /* SHIFT > 2 */
254}
255
256void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
257 DATA_TYPE val,
258 int mmu_idx)
259{
260 target_phys_addr_t ioaddr;
261 uintptr_t addend;
262 target_ulong tlb_addr;
263 void *retaddr;
264 int index;
265
266 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
267 redo:
268 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
269 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
270 if (tlb_addr & ~TARGET_PAGE_MASK) {
271 /* IO access */
272 if ((addr & (DATA_SIZE - 1)) != 0)
273 goto do_unaligned_access;
274 retaddr = GETPC();
275 ioaddr = env->iotlb[mmu_idx][index];
276 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
277 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
278 do_unaligned_access:
279 retaddr = GETPC();
280#ifdef ALIGNED_ONLY
281 do_unaligned_access(addr, 1, mmu_idx, retaddr);
282#endif
283 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
284 mmu_idx, retaddr);
285 } else {
286 /* aligned/unaligned access in the same page */
287#ifdef ALIGNED_ONLY
288 if ((addr & (DATA_SIZE - 1)) != 0) {
289 retaddr = GETPC();
290 do_unaligned_access(addr, 1, mmu_idx, retaddr);
291 }
292#endif
293 addend = env->tlb_table[mmu_idx][index].addend;
294 glue(glue(st, SUFFIX), _raw)((uint8_t *)(uintptr_t)(addr+addend), val);
295 }
296 } else {
297 /* the page is not in the TLB : fill it */
298 retaddr = GETPC();
299#ifdef ALIGNED_ONLY
300 if ((addr & (DATA_SIZE - 1)) != 0)
301 do_unaligned_access(addr, 1, mmu_idx, retaddr);
302#endif
303 tlb_fill(addr, 1, mmu_idx, retaddr);
304 goto redo;
305 }
306}
307
308/* handles all unaligned cases */
309static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
310 DATA_TYPE val,
311 int mmu_idx,
312 void *retaddr)
313{
314 target_phys_addr_t ioaddr;
315 uintptr_t addend;
316 target_ulong tlb_addr;
317 int index, i;
318
319 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
320 redo:
321 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
322 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
323 if (tlb_addr & ~TARGET_PAGE_MASK) {
324 /* IO access */
325 if ((addr & (DATA_SIZE - 1)) != 0)
326 goto do_unaligned_access;
327 ioaddr = env->iotlb[mmu_idx][index];
328 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
329 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
330 do_unaligned_access:
331 /* XXX: not efficient, but simple */
332 /* Note: relies on the fact that tlb_fill() does not remove the
333 * previous page from the TLB cache. */
334 for(i = DATA_SIZE - 1; i >= 0; i--) {
335#ifdef TARGET_WORDS_BIGENDIAN
336 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
337 mmu_idx, retaddr);
338#else
339 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
340 mmu_idx, retaddr);
341#endif
342 }
343 } else {
344 /* aligned/unaligned access in the same page */
345 addend = env->tlb_table[mmu_idx][index].addend;
346 glue(glue(st, SUFFIX), _raw)((uint8_t *)(uintptr_t)(addr+addend), val);
347 }
348 } else {
349 /* the page is not in the TLB : fill it */
350 tlb_fill(addr, 1, mmu_idx, retaddr);
351 goto redo;
352 }
353}
354
355#endif /* !defined(SOFTMMU_CODE_ACCESS) */
356
357#ifdef VBOX
358# undef DATA_TYPE_PROMOTED
359#endif
360#undef READ_ACCESS_TYPE
361#undef SHIFT
362#undef DATA_TYPE
363#undef SUFFIX
364#undef USUFFIX
365#undef DATA_SIZE
366#undef ADDR_READ
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette