VirtualBox

source: vbox/trunk/src/recompiler/softmmu_template.h@ 37101

最後變更 在這個檔案從37101是 36175,由 vboxsync 提交於 14 年 前

rem: Synced up to v0.11.1 (35bfc7324e2e6946c4113ada5db30553a1a7c40b) from git://git.savannah.nongnu.org/qemu.git.

  • 屬性 svn:eol-style 設為 native
檔案大小: 13.1 KB
 
1/*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#define DATA_SIZE (1 << SHIFT)
30
31#if DATA_SIZE == 8
32#define SUFFIX q
33#define USUFFIX q
34#define DATA_TYPE uint64_t
35#ifdef VBOX
36# define DATA_TYPE_PROMOTED uint64_t
37#endif
38#elif DATA_SIZE == 4
39#define SUFFIX l
40#define USUFFIX l
41#define DATA_TYPE uint32_t
42#ifdef VBOX
43# define DATA_TYPE_PROMOTED RTCCUINTREG
44#endif
45#elif DATA_SIZE == 2
46#define SUFFIX w
47#define USUFFIX uw
48#define DATA_TYPE uint16_t
49#ifdef VBOX
50# define DATA_TYPE_PROMOTED RTCCUINTREG
51#endif
52#elif DATA_SIZE == 1
53#define SUFFIX b
54#define USUFFIX ub
55#define DATA_TYPE uint8_t
56#ifdef VBOX
57# define DATA_TYPE_PROMOTED RTCCUINTREG
58#endif
59#else
60#error unsupported data size
61#endif
62
63#ifdef SOFTMMU_CODE_ACCESS
64#define READ_ACCESS_TYPE 2
65#define ADDR_READ addr_code
66#else
67#define READ_ACCESS_TYPE 0
68#define ADDR_READ addr_read
69#endif
70
71static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
72 int mmu_idx,
73 void *retaddr);
74static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
75 target_ulong addr,
76 void *retaddr)
77{
78 DATA_TYPE res;
79 int index;
80 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
81 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
82 env->mem_io_pc = (unsigned long)retaddr;
83 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
84 && !can_do_io(env)) {
85 cpu_io_recompile(env, retaddr);
86 }
87
88 env->mem_io_vaddr = addr;
89#if SHIFT <= 2
90 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
91#else
92#ifdef TARGET_WORDS_BIGENDIAN
93 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
94 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
95#else
96 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
97 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
98#endif
99#endif /* SHIFT > 2 */
100#ifdef CONFIG_KQEMU
101 env->last_io_time = cpu_get_time_fast();
102#endif
103 return res;
104}
105
106/* handle all cases except unaligned access which span two pages */
107#ifndef VBOX
108DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
109 int mmu_idx)
110#else
111/* Load helpers invoked from generated code, and TCG makes an assumption
112 that valid value takes the whole register, why gcc after 4.3 may
113 use only lower part of register for smaller types. So force promotion. */
114DATA_TYPE_PROMOTED REGPARM
115glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
116 int mmu_idx)
117#endif
118{
119 DATA_TYPE res;
120 int index;
121 target_ulong tlb_addr;
122 target_phys_addr_t addend;
123 void *retaddr;
124
125 /* test if there is match for unaligned or IO access */
126 /* XXX: could done more in memory macro in a non portable way */
127 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
128 redo:
129 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
130 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
131 if (tlb_addr & ~TARGET_PAGE_MASK) {
132 /* IO access */
133 if ((addr & (DATA_SIZE - 1)) != 0)
134 goto do_unaligned_access;
135 retaddr = GETPC();
136 addend = env->iotlb[mmu_idx][index];
137 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
138 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
139 /* slow unaligned access (it spans two pages or IO) */
140 do_unaligned_access:
141 retaddr = GETPC();
142#ifdef ALIGNED_ONLY
143 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
144#endif
145 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
146 mmu_idx, retaddr);
147 } else {
148 /* unaligned/aligned access in the same page */
149#ifdef ALIGNED_ONLY
150 if ((addr & (DATA_SIZE - 1)) != 0) {
151 retaddr = GETPC();
152 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
153 }
154#endif
155 addend = env->tlb_table[mmu_idx][index].addend;
156 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
157 }
158 } else {
159 /* the page is not in the TLB : fill it */
160 retaddr = GETPC();
161#ifdef ALIGNED_ONLY
162 if ((addr & (DATA_SIZE - 1)) != 0)
163 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
164#endif
165 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
166 goto redo;
167 }
168 return res;
169}
170
171/* handle all unaligned cases */
172static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
173 int mmu_idx,
174 void *retaddr)
175{
176 DATA_TYPE res, res1, res2;
177 int index, shift;
178 target_phys_addr_t addend;
179 target_ulong tlb_addr, addr1, addr2;
180
181 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
182 redo:
183 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
184 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
185 if (tlb_addr & ~TARGET_PAGE_MASK) {
186 /* IO access */
187 if ((addr & (DATA_SIZE - 1)) != 0)
188 goto do_unaligned_access;
189 retaddr = GETPC();
190 addend = env->iotlb[mmu_idx][index];
191 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
192 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
193 do_unaligned_access:
194 /* slow unaligned access (it spans two pages) */
195 addr1 = addr & ~(DATA_SIZE - 1);
196 addr2 = addr1 + DATA_SIZE;
197 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
198 mmu_idx, retaddr);
199 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
200 mmu_idx, retaddr);
201 shift = (addr & (DATA_SIZE - 1)) * 8;
202#ifdef TARGET_WORDS_BIGENDIAN
203 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
204#else
205 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
206#endif
207 res = (DATA_TYPE)res;
208 } else {
209 /* unaligned/aligned access in the same page */
210 addend = env->tlb_table[mmu_idx][index].addend;
211 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
212 }
213 } else {
214 /* the page is not in the TLB : fill it */
215 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
216 goto redo;
217 }
218 return res;
219}
220
221#ifndef SOFTMMU_CODE_ACCESS
222
223static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
224 DATA_TYPE val,
225 int mmu_idx,
226 void *retaddr);
227
228static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
229 DATA_TYPE val,
230 target_ulong addr,
231 void *retaddr)
232{
233 int index;
234 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
235 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
236 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
237 && !can_do_io(env)) {
238 cpu_io_recompile(env, retaddr);
239 }
240
241 env->mem_io_vaddr = addr;
242 env->mem_io_pc = (unsigned long)retaddr;
243#if SHIFT <= 2
244 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
245#else
246#ifdef TARGET_WORDS_BIGENDIAN
247 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
248 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
249#else
250 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
251 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
252#endif
253#endif /* SHIFT > 2 */
254#ifdef CONFIG_KQEMU
255 env->last_io_time = cpu_get_time_fast();
256#endif
257}
258
259void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
260 DATA_TYPE val,
261 int mmu_idx)
262{
263 target_phys_addr_t addend;
264 target_ulong tlb_addr;
265 void *retaddr;
266 int index;
267
268 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
269 redo:
270 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
271 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
272 if (tlb_addr & ~TARGET_PAGE_MASK) {
273 /* IO access */
274 if ((addr & (DATA_SIZE - 1)) != 0)
275 goto do_unaligned_access;
276 retaddr = GETPC();
277 addend = env->iotlb[mmu_idx][index];
278 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
279 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
280 do_unaligned_access:
281 retaddr = GETPC();
282#ifdef ALIGNED_ONLY
283 do_unaligned_access(addr, 1, mmu_idx, retaddr);
284#endif
285 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
286 mmu_idx, retaddr);
287 } else {
288 /* aligned/unaligned access in the same page */
289#ifdef ALIGNED_ONLY
290 if ((addr & (DATA_SIZE - 1)) != 0) {
291 retaddr = GETPC();
292 do_unaligned_access(addr, 1, mmu_idx, retaddr);
293 }
294#endif
295 addend = env->tlb_table[mmu_idx][index].addend;
296 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
297 }
298 } else {
299 /* the page is not in the TLB : fill it */
300 retaddr = GETPC();
301#ifdef ALIGNED_ONLY
302 if ((addr & (DATA_SIZE - 1)) != 0)
303 do_unaligned_access(addr, 1, mmu_idx, retaddr);
304#endif
305 tlb_fill(addr, 1, mmu_idx, retaddr);
306 goto redo;
307 }
308}
309
310/* handles all unaligned cases */
311static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
312 DATA_TYPE val,
313 int mmu_idx,
314 void *retaddr)
315{
316 target_phys_addr_t addend;
317 target_ulong tlb_addr;
318 int index, i;
319
320 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
321 redo:
322 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
323 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
324 if (tlb_addr & ~TARGET_PAGE_MASK) {
325 /* IO access */
326 if ((addr & (DATA_SIZE - 1)) != 0)
327 goto do_unaligned_access;
328 addend = env->iotlb[mmu_idx][index];
329 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
330 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
331 do_unaligned_access:
332 /* XXX: not efficient, but simple */
333 /* Note: relies on the fact that tlb_fill() does not remove the
334 * previous page from the TLB cache. */
335 for(i = DATA_SIZE - 1; i >= 0; i--) {
336#ifdef TARGET_WORDS_BIGENDIAN
337 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
338 mmu_idx, retaddr);
339#else
340 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
341 mmu_idx, retaddr);
342#endif
343 }
344 } else {
345 /* aligned/unaligned access in the same page */
346 addend = env->tlb_table[mmu_idx][index].addend;
347 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
348 }
349 } else {
350 /* the page is not in the TLB : fill it */
351 tlb_fill(addr, 1, mmu_idx, retaddr);
352 goto redo;
353 }
354}
355
356#endif /* !defined(SOFTMMU_CODE_ACCESS) */
357
358#ifdef VBOX
359# undef DATA_TYPE_PROMOTED
360#endif
361#undef READ_ACCESS_TYPE
362#undef SHIFT
363#undef DATA_TYPE
364#undef SUFFIX
365#undef USUFFIX
366#undef DATA_SIZE
367#undef ADDR_READ
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette