VirtualBox

source: vbox/trunk/src/recompiler_new/softmmu_template.h@ 13462

最後變更 在這個檔案從13462是 13382,由 vboxsync 提交於 16 年 前

more MSVC-related stuff

  • 屬性 svn:eol-style 設為 native
檔案大小: 13.0 KB
 
1/*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define DATA_SIZE (1 << SHIFT)
30
31#if DATA_SIZE == 8
32#define SUFFIX q
33#define USUFFIX q
34#define DATA_TYPE uint64_t
35#elif DATA_SIZE == 4
36#define SUFFIX l
37#define USUFFIX l
38#define DATA_TYPE uint32_t
39#elif DATA_SIZE == 2
40#define SUFFIX w
41#define USUFFIX uw
42#define DATA_TYPE uint16_t
43#elif DATA_SIZE == 1
44#define SUFFIX b
45#define USUFFIX ub
46#define DATA_TYPE uint8_t
47#else
48#error unsupported data size
49#endif
50
51#ifdef SOFTMMU_CODE_ACCESS
52#define READ_ACCESS_TYPE 2
53#define ADDR_READ addr_code
54#else
55#define READ_ACCESS_TYPE 0
56#define ADDR_READ addr_read
57#endif
58
59static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
60 int is_user,
61 void *retaddr);
62#ifndef VBOX
63static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
64 target_ulong addr,
65 void *retaddr)
66#else
67DECLINLINE(DATA_TYPE) glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
68 target_ulong addr,
69 void *retaddr)
70#endif
71{
72 DATA_TYPE res;
73 int index;
74 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
75 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
76 env->mem_io_pc = (unsigned long)retaddr;
77 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
78 && !can_do_io(env)) {
79 cpu_io_recompile(env, retaddr);
80 }
81
82#if SHIFT <= 2
83 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
84#else
85#ifdef TARGET_WORDS_BIGENDIAN
86 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
87 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
88#else
89 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
90 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
91#endif
92#endif /* SHIFT > 2 */
93#ifdef USE_KQEMU
94 env->last_io_time = cpu_get_time_fast();
95#endif
96 return res;
97}
98
99/* handle all cases except unaligned access which span two pages */
100DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
101 int mmu_idx)
102{
103 DATA_TYPE res;
104 int index;
105 target_ulong tlb_addr;
106 target_phys_addr_t addend;
107 void *retaddr;
108
109 /* test if there is match for unaligned or IO access */
110 /* XXX: could done more in memory macro in a non portable way */
111 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
112 redo:
113 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
114 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
115 if (tlb_addr & ~TARGET_PAGE_MASK) {
116 /* IO access */
117 if ((addr & (DATA_SIZE - 1)) != 0)
118 goto do_unaligned_access;
119 retaddr = GETPC();
120 addend = env->iotlb[mmu_idx][index];
121 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
122 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
123 /* slow unaligned access (it spans two pages or IO) */
124 do_unaligned_access:
125 retaddr = GETPC();
126#ifdef ALIGNED_ONLY
127 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
128#endif
129 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
130 mmu_idx, retaddr);
131 } else {
132 /* unaligned/aligned access in the same page */
133#ifdef ALIGNED_ONLY
134 if ((addr & (DATA_SIZE - 1)) != 0) {
135 retaddr = GETPC();
136 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
137 }
138#endif
139 addend = env->tlb_table[mmu_idx][index].addend;
140 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
141 }
142 } else {
143 /* the page is not in the TLB : fill it */
144 retaddr = GETPC();
145#ifdef ALIGNED_ONLY
146 if ((addr & (DATA_SIZE - 1)) != 0)
147 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
148#endif
149 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
150 goto redo;
151 }
152 return res;
153}
154
155/* handle all unaligned cases */
156static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
157 int mmu_idx,
158 void *retaddr)
159{
160 DATA_TYPE res, res1, res2;
161 int index, shift;
162 target_phys_addr_t addend;
163 target_ulong tlb_addr, addr1, addr2;
164
165 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
166 redo:
167 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
168 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
169 if (tlb_addr & ~TARGET_PAGE_MASK) {
170 /* IO access */
171 if ((addr & (DATA_SIZE - 1)) != 0)
172 goto do_unaligned_access;
173 retaddr = GETPC();
174 addend = env->iotlb[mmu_idx][index];
175 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
176 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
177 do_unaligned_access:
178 /* slow unaligned access (it spans two pages) */
179 addr1 = addr & ~(DATA_SIZE - 1);
180 addr2 = addr1 + DATA_SIZE;
181 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
182 mmu_idx, retaddr);
183 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
184 mmu_idx, retaddr);
185 shift = (addr & (DATA_SIZE - 1)) * 8;
186#ifdef TARGET_WORDS_BIGENDIAN
187 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
188#else
189 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
190#endif
191 res = (DATA_TYPE)res;
192 } else {
193 /* unaligned/aligned access in the same page */
194 addend = env->tlb_table[mmu_idx][index].addend;
195 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
196 }
197 } else {
198 /* the page is not in the TLB : fill it */
199 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
200 goto redo;
201 }
202 return res;
203}
204
205#ifndef SOFTMMU_CODE_ACCESS
206
207static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
208 DATA_TYPE val,
209 int is_user,
210 void *retaddr);
211
212#ifndef VBOX
213static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
214 DATA_TYPE val,
215 target_ulong addr,
216 void *retaddr)
217#else
218DECLINLINE(void) glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
219 DATA_TYPE val,
220 target_ulong addr,
221 void *retaddr)
222#endif
223{
224 int index;
225 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
226 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
227 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
228 && !can_do_io(env)) {
229 cpu_io_recompile(env, retaddr);
230 }
231
232 env->mem_io_vaddr = addr;
233 env->mem_io_pc = (unsigned long)retaddr;
234#if SHIFT <= 2
235 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
236#else
237#ifdef TARGET_WORDS_BIGENDIAN
238 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
239 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
240#else
241 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
242 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
243#endif
244#endif /* SHIFT > 2 */
245#ifdef USE_KQEMU
246 env->last_io_time = cpu_get_time_fast();
247#endif
248}
249
250void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
251 DATA_TYPE val,
252 int mmu_idx)
253{
254 target_phys_addr_t addend;
255 target_ulong tlb_addr;
256 void *retaddr;
257 int index;
258
259 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
260 redo:
261 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
262 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
263 if (tlb_addr & ~TARGET_PAGE_MASK) {
264 /* IO access */
265 if ((addr & (DATA_SIZE - 1)) != 0)
266 goto do_unaligned_access;
267 retaddr = GETPC();
268 addend = env->iotlb[mmu_idx][index];
269 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
270 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
271 do_unaligned_access:
272 retaddr = GETPC();
273#ifdef ALIGNED_ONLY
274 do_unaligned_access(addr, 1, mmu_idx, retaddr);
275#endif
276 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
277 mmu_idx, retaddr);
278 } else {
279 /* aligned/unaligned access in the same page */
280#ifdef ALIGNED_ONLY
281 if ((addr & (DATA_SIZE - 1)) != 0) {
282 retaddr = GETPC();
283 do_unaligned_access(addr, 1, mmu_idx, retaddr);
284 }
285#endif
286 addend = env->tlb_table[mmu_idx][index].addend;
287 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
288 }
289 } else {
290 /* the page is not in the TLB : fill it */
291 retaddr = GETPC();
292#ifdef ALIGNED_ONLY
293 if ((addr & (DATA_SIZE - 1)) != 0)
294 do_unaligned_access(addr, 1, mmu_idx, retaddr);
295#endif
296 tlb_fill(addr, 1, mmu_idx, retaddr);
297 goto redo;
298 }
299}
300
301/* handles all unaligned cases */
302/* handles all unaligned cases */
303static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
304 DATA_TYPE val,
305 int mmu_idx,
306 void *retaddr)
307{
308 target_phys_addr_t addend;
309 target_ulong tlb_addr;
310 int index, i;
311
312 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
313 redo:
314 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
315 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
316 if (tlb_addr & ~TARGET_PAGE_MASK) {
317 /* IO access */
318 if ((addr & (DATA_SIZE - 1)) != 0)
319 goto do_unaligned_access;
320 addend = env->iotlb[mmu_idx][index];
321 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
322 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
323 do_unaligned_access:
324 /* XXX: not efficient, but simple */
325 /* Note: relies on the fact that tlb_fill() does not remove the
326 * previous page from the TLB cache. */
327 for(i = DATA_SIZE - 1; i >= 0; i--) {
328#ifdef TARGET_WORDS_BIGENDIAN
329 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
330 mmu_idx, retaddr);
331#else
332 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
333 mmu_idx, retaddr);
334#endif
335 }
336 } else {
337 /* aligned/unaligned access in the same page */
338 addend = env->tlb_table[mmu_idx][index].addend;
339 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
340 }
341 } else {
342 /* the page is not in the TLB : fill it */
343 tlb_fill(addr, 1, mmu_idx, retaddr);
344 goto redo;
345 }
346}
347
348#endif /* !defined(SOFTMMU_CODE_ACCESS) */
349
350#undef READ_ACCESS_TYPE
351#undef SHIFT
352#undef DATA_TYPE
353#undef SUFFIX
354#undef USUFFIX
355#undef DATA_SIZE
356#undef ADDR_READ
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette