VirtualBox

source: vbox/trunk/src/recompiler/softmmu_template.h@ 25722

最後變更 在這個檔案從25722是 15173,由 vboxsync 提交於 16 年 前

an attempt to fix MacOS alignment issues

  • 屬性 svn:eol-style 設為 native
檔案大小: 13.6 KB
 
1/*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define DATA_SIZE (1 << SHIFT)
30
31#if DATA_SIZE == 8
32#define SUFFIX q
33#define USUFFIX q
34#define DATA_TYPE uint64_t
35#define DATA_TYPE_PROMOTED uint64_t
36#elif DATA_SIZE == 4
37#define SUFFIX l
38#define USUFFIX l
39#define DATA_TYPE uint32_t
40#ifdef VBOX
41#define DATA_TYPE_PROMOTED RTCCUINTREG
42#endif
43#elif DATA_SIZE == 2
44#define SUFFIX w
45#define USUFFIX uw
46#define DATA_TYPE uint16_t
47#ifdef VBOX
48#define DATA_TYPE_PROMOTED RTCCUINTREG
49#endif
50#elif DATA_SIZE == 1
51#define SUFFIX b
52#define USUFFIX ub
53#define DATA_TYPE uint8_t
54#ifdef VBOX
55#define DATA_TYPE_PROMOTED RTCCUINTREG
56#endif
57#else
58#error unsupported data size
59#endif
60
61#ifdef SOFTMMU_CODE_ACCESS
62#define READ_ACCESS_TYPE 2
63#define ADDR_READ addr_code
64#else
65#define READ_ACCESS_TYPE 0
66#define ADDR_READ addr_read
67#endif
68
69static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
70 int mmu_idx,
71 void *retaddr);
72#ifndef VBOX
73static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
74 target_ulong addr,
75 void *retaddr)
76#else
77DECLINLINE(DATA_TYPE) glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
78 target_ulong addr,
79 void *retaddr)
80#endif
81{
82 DATA_TYPE res;
83 int index;
84 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
85 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
86 env->mem_io_pc = (unsigned long)retaddr;
87 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
88 && !can_do_io(env)) {
89 cpu_io_recompile(env, retaddr);
90 }
91
92#if SHIFT <= 2
93 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
94#else
95#ifdef TARGET_WORDS_BIGENDIAN
96 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
97 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
98#else
99 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
100 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
101#endif
102#endif /* SHIFT > 2 */
103#ifdef USE_KQEMU
104 env->last_io_time = cpu_get_time_fast();
105#endif
106 return res;
107}
108
109/* handle all cases except unaligned access which span two pages */
110#ifndef VBOX
111DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
112 int mmu_idx)
113#else
114/* Load helpers invoked from generated code, and TCG makes an assumption
115 that valid value takes the whole register, why gcc after 4.3 may
116 use only lower part of register for smaller types. So force promoution. */
117DATA_TYPE_PROMOTED REGPARM
118glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
119 int mmu_idx)
120#endif
121{
122 DATA_TYPE res;
123 int index;
124 target_ulong tlb_addr;
125 target_phys_addr_t addend;
126 void *retaddr;
127
128 /* test if there is match for unaligned or IO access */
129 /* XXX: could done more in memory macro in a non portable way */
130 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
131 redo:
132 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
133 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
134 if (tlb_addr & ~TARGET_PAGE_MASK) {
135 /* IO access */
136 if ((addr & (DATA_SIZE - 1)) != 0)
137 goto do_unaligned_access;
138 retaddr = GETPC();
139 addend = env->iotlb[mmu_idx][index];
140 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
141 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
142 /* slow unaligned access (it spans two pages or IO) */
143 do_unaligned_access:
144 retaddr = GETPC();
145#ifdef ALIGNED_ONLY
146 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
147#endif
148 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
149 mmu_idx, retaddr);
150 } else {
151 /* unaligned/aligned access in the same page */
152#ifdef ALIGNED_ONLY
153 if ((addr & (DATA_SIZE - 1)) != 0) {
154 retaddr = GETPC();
155 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
156 }
157#endif
158 addend = env->tlb_table[mmu_idx][index].addend;
159 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
160 }
161 } else {
162 /* the page is not in the TLB : fill it */
163 retaddr = GETPC();
164#ifdef ALIGNED_ONLY
165 if ((addr & (DATA_SIZE - 1)) != 0)
166 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
167#endif
168 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
169 goto redo;
170 }
171 return res;
172}
173
174/* handle all unaligned cases */
175static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
176 int mmu_idx,
177 void *retaddr)
178{
179 DATA_TYPE res, res1, res2;
180 int index, shift;
181 target_phys_addr_t addend;
182 target_ulong tlb_addr, addr1, addr2;
183
184 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
185 redo:
186 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
187 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
188 if (tlb_addr & ~TARGET_PAGE_MASK) {
189 /* IO access */
190 if ((addr & (DATA_SIZE - 1)) != 0)
191 goto do_unaligned_access;
192 retaddr = GETPC();
193 addend = env->iotlb[mmu_idx][index];
194 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
195 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
196 do_unaligned_access:
197 /* slow unaligned access (it spans two pages) */
198 addr1 = addr & ~(DATA_SIZE - 1);
199 addr2 = addr1 + DATA_SIZE;
200 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
201 mmu_idx, retaddr);
202 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
203 mmu_idx, retaddr);
204 shift = (addr & (DATA_SIZE - 1)) * 8;
205#ifdef TARGET_WORDS_BIGENDIAN
206 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
207#else
208 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
209#endif
210 res = (DATA_TYPE)res;
211 } else {
212 /* unaligned/aligned access in the same page */
213 addend = env->tlb_table[mmu_idx][index].addend;
214 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
215 }
216 } else {
217 /* the page is not in the TLB : fill it */
218 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
219 goto redo;
220 }
221 return res;
222}
223
224#ifndef SOFTMMU_CODE_ACCESS
225
226static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
227 DATA_TYPE val,
228 int mmu_idx,
229 void *retaddr);
230
231#ifndef VBOX
232static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
233 DATA_TYPE val,
234 target_ulong addr,
235 void *retaddr)
236#else
237DECLINLINE(void) glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
238 DATA_TYPE val,
239 target_ulong addr,
240 void *retaddr)
241#endif
242{
243 int index;
244 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
245 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
246 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
247 && !can_do_io(env)) {
248 cpu_io_recompile(env, retaddr);
249 }
250
251 env->mem_io_vaddr = addr;
252 env->mem_io_pc = (unsigned long)retaddr;
253#if SHIFT <= 2
254 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
255#else
256#ifdef TARGET_WORDS_BIGENDIAN
257 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
258 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
259#else
260 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
261 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
262#endif
263#endif /* SHIFT > 2 */
264#ifdef USE_KQEMU
265 env->last_io_time = cpu_get_time_fast();
266#endif
267}
268
269void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
270 DATA_TYPE val,
271 int mmu_idx)
272{
273 target_phys_addr_t addend;
274 target_ulong tlb_addr;
275 void *retaddr;
276 int index;
277
278 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
279 redo:
280 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
281 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
282 if (tlb_addr & ~TARGET_PAGE_MASK) {
283 /* IO access */
284 if ((addr & (DATA_SIZE - 1)) != 0)
285 goto do_unaligned_access;
286 retaddr = GETPC();
287 addend = env->iotlb[mmu_idx][index];
288 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
289 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
290 do_unaligned_access:
291 retaddr = GETPC();
292#ifdef ALIGNED_ONLY
293 do_unaligned_access(addr, 1, mmu_idx, retaddr);
294#endif
295 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
296 mmu_idx, retaddr);
297 } else {
298 /* aligned/unaligned access in the same page */
299#ifdef ALIGNED_ONLY
300 if ((addr & (DATA_SIZE - 1)) != 0) {
301 retaddr = GETPC();
302 do_unaligned_access(addr, 1, mmu_idx, retaddr);
303 }
304#endif
305 addend = env->tlb_table[mmu_idx][index].addend;
306 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
307 }
308 } else {
309 /* the page is not in the TLB : fill it */
310 retaddr = GETPC();
311#ifdef ALIGNED_ONLY
312 if ((addr & (DATA_SIZE - 1)) != 0)
313 do_unaligned_access(addr, 1, mmu_idx, retaddr);
314#endif
315 tlb_fill(addr, 1, mmu_idx, retaddr);
316 goto redo;
317 }
318}
319
320/* handles all unaligned cases */
321static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
322 DATA_TYPE val,
323 int mmu_idx,
324 void *retaddr)
325{
326 target_phys_addr_t addend;
327 target_ulong tlb_addr;
328 int index, i;
329
330 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
331 redo:
332 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
333 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
334 if (tlb_addr & ~TARGET_PAGE_MASK) {
335 /* IO access */
336 if ((addr & (DATA_SIZE - 1)) != 0)
337 goto do_unaligned_access;
338 addend = env->iotlb[mmu_idx][index];
339 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
340 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
341 do_unaligned_access:
342 /* XXX: not efficient, but simple */
343 /* Note: relies on the fact that tlb_fill() does not remove the
344 * previous page from the TLB cache. */
345 for(i = DATA_SIZE - 1; i >= 0; i--) {
346#ifdef TARGET_WORDS_BIGENDIAN
347 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
348 mmu_idx, retaddr);
349#else
350 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
351 mmu_idx, retaddr);
352#endif
353 }
354 } else {
355 /* aligned/unaligned access in the same page */
356 addend = env->tlb_table[mmu_idx][index].addend;
357 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
358 }
359 } else {
360 /* the page is not in the TLB : fill it */
361 tlb_fill(addr, 1, mmu_idx, retaddr);
362 goto redo;
363 }
364}
365
366#endif /* !defined(SOFTMMU_CODE_ACCESS) */
367
368#ifdef VBOX
369#undef DATA_TYPE_PROMOTED
370#endif
371#undef READ_ACCESS_TYPE
372#undef SHIFT
373#undef DATA_TYPE
374#undef SUFFIX
375#undef USUFFIX
376#undef DATA_SIZE
377#undef ADDR_READ
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette