blob: e2490f03a739bf2d38f1b23821eff8ab2f3d9a6b [file] [log] [blame]
bellardb92e5a22003-08-08 23:58:05 +00001/*
2 * Software MMU support
ths5fafdf22007-09-16 21:08:06 +00003 *
Blue Swirlefbf29b2011-09-21 20:00:18 +00004 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
bellardb92e5a22003-08-08 23:58:05 +00009 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000022 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellardb92e5a22003-08-08 23:58:05 +000023 */
Blue Swirl29e922b2010-03-29 19:24:00 +000024#include "qemu-timer.h"
Avi Kivity0e0df1e2012-01-02 00:32:15 +020025#include "memory.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000026
bellardb92e5a22003-08-08 23:58:05 +000027#define DATA_SIZE (1 << SHIFT)
28
29#if DATA_SIZE == 8
30#define SUFFIX q
bellard61382a52003-10-27 21:22:23 +000031#define USUFFIX q
bellardb92e5a22003-08-08 23:58:05 +000032#define DATA_TYPE uint64_t
33#elif DATA_SIZE == 4
34#define SUFFIX l
bellard61382a52003-10-27 21:22:23 +000035#define USUFFIX l
bellardb92e5a22003-08-08 23:58:05 +000036#define DATA_TYPE uint32_t
37#elif DATA_SIZE == 2
38#define SUFFIX w
bellard61382a52003-10-27 21:22:23 +000039#define USUFFIX uw
bellardb92e5a22003-08-08 23:58:05 +000040#define DATA_TYPE uint16_t
41#elif DATA_SIZE == 1
42#define SUFFIX b
bellard61382a52003-10-27 21:22:23 +000043#define USUFFIX ub
bellardb92e5a22003-08-08 23:58:05 +000044#define DATA_TYPE uint8_t
45#else
46#error unsupported data size
47#endif
48
bellardb769d8f2004-10-03 15:07:13 +000049#ifdef SOFTMMU_CODE_ACCESS
50#define READ_ACCESS_TYPE 2
bellard84b7b8e2005-11-28 21:19:04 +000051#define ADDR_READ addr_code
bellardb769d8f2004-10-03 15:07:13 +000052#else
53#define READ_ACCESS_TYPE 0
bellard84b7b8e2005-11-28 21:19:04 +000054#define ADDR_READ addr_read
bellardb769d8f2004-10-03 15:07:13 +000055#endif
56
Blue Swirl89c33332012-09-02 15:28:56 +000057static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
Blue Swirle141ab52011-09-18 14:55:46 +000058 target_ulong addr,
j_mayer6ebbf392007-10-14 07:07:08 +000059 int mmu_idx,
Blue Swirl20503962012-04-09 14:20:20 +000060 uintptr_t retaddr);
Blue Swirl89c33332012-09-02 15:28:56 +000061static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
Blue Swirle141ab52011-09-18 14:55:46 +000062 target_phys_addr_t physaddr,
pbrook2e70f6e2008-06-29 01:03:05 +000063 target_ulong addr,
Blue Swirl20503962012-04-09 14:20:20 +000064 uintptr_t retaddr)
bellardb92e5a22003-08-08 23:58:05 +000065{
66 DATA_TYPE res;
Avi Kivity37ec01d2012-03-08 18:08:35 +020067 MemoryRegion *mr = iotlb_to_region(physaddr);
68
pbrook0f459d12008-06-09 00:20:13 +000069 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
Blue Swirl20503962012-04-09 14:20:20 +000070 env->mem_io_pc = retaddr;
Avi Kivity37ec01d2012-03-08 18:08:35 +020071 if (mr != &io_mem_ram && mr != &io_mem_rom
72 && mr != &io_mem_unassigned
73 && mr != &io_mem_notdirty
pbrook2e70f6e2008-06-29 01:03:05 +000074 && !can_do_io(env)) {
75 cpu_io_recompile(env, retaddr);
76 }
bellardb92e5a22003-08-08 23:58:05 +000077
aliguoridb8886d2008-11-18 20:09:43 +000078 env->mem_io_vaddr = addr;
bellardb92e5a22003-08-08 23:58:05 +000079#if SHIFT <= 2
Avi Kivity37ec01d2012-03-08 18:08:35 +020080 res = io_mem_read(mr, physaddr, 1 << SHIFT);
bellardb92e5a22003-08-08 23:58:05 +000081#else
82#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +020083 res = io_mem_read(mr, physaddr, 4) << 32;
84 res |= io_mem_read(mr, physaddr + 4, 4);
bellardb92e5a22003-08-08 23:58:05 +000085#else
Avi Kivity37ec01d2012-03-08 18:08:35 +020086 res = io_mem_read(mr, physaddr, 4);
87 res |= io_mem_read(mr, physaddr + 4, 4) << 32;
bellardb92e5a22003-08-08 23:58:05 +000088#endif
89#endif /* SHIFT > 2 */
90 return res;
91}
92
bellardb92e5a22003-08-08 23:58:05 +000093/* handle all cases except unaligned access which span two pages */
Blue Swirle141ab52011-09-18 14:55:46 +000094DATA_TYPE
Blue Swirl89c33332012-09-02 15:28:56 +000095glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
96 int mmu_idx)
bellardb92e5a22003-08-08 23:58:05 +000097{
98 DATA_TYPE res;
bellard61382a52003-10-27 21:22:23 +000099 int index;
bellardc27004e2005-01-03 23:35:10 +0000100 target_ulong tlb_addr;
Paul Brook355b1942010-04-05 00:28:53 +0100101 target_phys_addr_t ioaddr;
Blue Swirl20503962012-04-09 14:20:20 +0000102 uintptr_t retaddr;
ths3b46e622007-09-17 08:09:54 +0000103
bellardb92e5a22003-08-08 23:58:05 +0000104 /* test if there is match for unaligned or IO access */
105 /* XXX: could done more in memory macro in a non portable way */
bellardb92e5a22003-08-08 23:58:05 +0000106 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
107 redo:
j_mayer6ebbf392007-10-14 07:07:08 +0000108 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
bellardb92e5a22003-08-08 23:58:05 +0000109 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
bellardb92e5a22003-08-08 23:58:05 +0000110 if (tlb_addr & ~TARGET_PAGE_MASK) {
111 /* IO access */
112 if ((addr & (DATA_SIZE - 1)) != 0)
113 goto do_unaligned_access;
pbrook2e70f6e2008-06-29 01:03:05 +0000114 retaddr = GETPC();
Avi Kivity37ec01d2012-03-08 18:08:35 +0200115 ioaddr = env->iotlb[mmu_idx][index];
Blue Swirl89c33332012-09-02 15:28:56 +0000116 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
bellard98699962005-11-26 10:29:22 +0000117 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
bellardb92e5a22003-08-08 23:58:05 +0000118 /* slow unaligned access (it spans two pages or IO) */
119 do_unaligned_access:
bellard61382a52003-10-27 21:22:23 +0000120 retaddr = GETPC();
bellarda64d4712005-12-05 19:57:57 +0000121#ifdef ALIGNED_ONLY
Blue Swirl89c33332012-09-02 15:28:56 +0000122 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
bellarda64d4712005-12-05 19:57:57 +0000123#endif
Blue Swirl89c33332012-09-02 15:28:56 +0000124 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr,
j_mayer6ebbf392007-10-14 07:07:08 +0000125 mmu_idx, retaddr);
bellardb92e5a22003-08-08 23:58:05 +0000126 } else {
bellarda64d4712005-12-05 19:57:57 +0000127 /* unaligned/aligned access in the same page */
Stefan Weilb0659272012-04-12 14:14:51 +0200128 uintptr_t addend;
bellarda64d4712005-12-05 19:57:57 +0000129#ifdef ALIGNED_ONLY
130 if ((addr & (DATA_SIZE - 1)) != 0) {
131 retaddr = GETPC();
Blue Swirl89c33332012-09-02 15:28:56 +0000132 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
bellarda64d4712005-12-05 19:57:57 +0000133 }
134#endif
pbrook0f459d12008-06-09 00:20:13 +0000135 addend = env->tlb_table[mmu_idx][index].addend;
Stefan Weilb0659272012-04-12 14:14:51 +0200136 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
137 (addr + addend));
bellardb92e5a22003-08-08 23:58:05 +0000138 }
139 } else {
140 /* the page is not in the TLB : fill it */
bellard61382a52003-10-27 21:22:23 +0000141 retaddr = GETPC();
bellarda64d4712005-12-05 19:57:57 +0000142#ifdef ALIGNED_ONLY
143 if ((addr & (DATA_SIZE - 1)) != 0)
Blue Swirl89c33332012-09-02 15:28:56 +0000144 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
bellarda64d4712005-12-05 19:57:57 +0000145#endif
Blue Swirlbccd9ec2011-07-04 20:57:05 +0000146 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
bellardb92e5a22003-08-08 23:58:05 +0000147 goto redo;
148 }
149 return res;
150}
151
152/* handle all unaligned cases */
Blue Swirle141ab52011-09-18 14:55:46 +0000153static DATA_TYPE
Blue Swirl89c33332012-09-02 15:28:56 +0000154glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
Blue Swirle141ab52011-09-18 14:55:46 +0000155 target_ulong addr,
156 int mmu_idx,
Blue Swirl20503962012-04-09 14:20:20 +0000157 uintptr_t retaddr)
bellardb92e5a22003-08-08 23:58:05 +0000158{
159 DATA_TYPE res, res1, res2;
bellard61382a52003-10-27 21:22:23 +0000160 int index, shift;
Paul Brook355b1942010-04-05 00:28:53 +0100161 target_phys_addr_t ioaddr;
bellardc27004e2005-01-03 23:35:10 +0000162 target_ulong tlb_addr, addr1, addr2;
bellardb92e5a22003-08-08 23:58:05 +0000163
bellardb92e5a22003-08-08 23:58:05 +0000164 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
165 redo:
j_mayer6ebbf392007-10-14 07:07:08 +0000166 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
bellardb92e5a22003-08-08 23:58:05 +0000167 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
bellardb92e5a22003-08-08 23:58:05 +0000168 if (tlb_addr & ~TARGET_PAGE_MASK) {
169 /* IO access */
170 if ((addr & (DATA_SIZE - 1)) != 0)
171 goto do_unaligned_access;
Avi Kivity37ec01d2012-03-08 18:08:35 +0200172 ioaddr = env->iotlb[mmu_idx][index];
Blue Swirl89c33332012-09-02 15:28:56 +0000173 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
bellard98699962005-11-26 10:29:22 +0000174 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
bellardb92e5a22003-08-08 23:58:05 +0000175 do_unaligned_access:
176 /* slow unaligned access (it spans two pages) */
177 addr1 = addr & ~(DATA_SIZE - 1);
178 addr2 = addr1 + DATA_SIZE;
Blue Swirl89c33332012-09-02 15:28:56 +0000179 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr1,
j_mayer6ebbf392007-10-14 07:07:08 +0000180 mmu_idx, retaddr);
Blue Swirl89c33332012-09-02 15:28:56 +0000181 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr2,
j_mayer6ebbf392007-10-14 07:07:08 +0000182 mmu_idx, retaddr);
bellardb92e5a22003-08-08 23:58:05 +0000183 shift = (addr & (DATA_SIZE - 1)) * 8;
184#ifdef TARGET_WORDS_BIGENDIAN
185 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
186#else
187 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
188#endif
bellard6986f882004-01-18 21:53:18 +0000189 res = (DATA_TYPE)res;
bellardb92e5a22003-08-08 23:58:05 +0000190 } else {
191 /* unaligned/aligned access in the same page */
Stefan Weilb0659272012-04-12 14:14:51 +0200192 uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
193 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
194 (addr + addend));
bellardb92e5a22003-08-08 23:58:05 +0000195 }
196 } else {
197 /* the page is not in the TLB : fill it */
Blue Swirlbccd9ec2011-07-04 20:57:05 +0000198 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
bellardb92e5a22003-08-08 23:58:05 +0000199 goto redo;
200 }
201 return res;
202}
203
bellardb769d8f2004-10-03 15:07:13 +0000204#ifndef SOFTMMU_CODE_ACCESS
205
Blue Swirl89c33332012-09-02 15:28:56 +0000206static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
Blue Swirle141ab52011-09-18 14:55:46 +0000207 target_ulong addr,
ths5fafdf22007-09-16 21:08:06 +0000208 DATA_TYPE val,
j_mayer6ebbf392007-10-14 07:07:08 +0000209 int mmu_idx,
Blue Swirl20503962012-04-09 14:20:20 +0000210 uintptr_t retaddr);
bellardb769d8f2004-10-03 15:07:13 +0000211
Blue Swirl89c33332012-09-02 15:28:56 +0000212static inline void glue(io_write, SUFFIX)(CPUArchState *env,
Blue Swirle141ab52011-09-18 14:55:46 +0000213 target_phys_addr_t physaddr,
bellardb769d8f2004-10-03 15:07:13 +0000214 DATA_TYPE val,
pbrook0f459d12008-06-09 00:20:13 +0000215 target_ulong addr,
Blue Swirl20503962012-04-09 14:20:20 +0000216 uintptr_t retaddr)
bellardb769d8f2004-10-03 15:07:13 +0000217{
Avi Kivity37ec01d2012-03-08 18:08:35 +0200218 MemoryRegion *mr = iotlb_to_region(physaddr);
219
pbrook0f459d12008-06-09 00:20:13 +0000220 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
Avi Kivity37ec01d2012-03-08 18:08:35 +0200221 if (mr != &io_mem_ram && mr != &io_mem_rom
222 && mr != &io_mem_unassigned
223 && mr != &io_mem_notdirty
pbrook2e70f6e2008-06-29 01:03:05 +0000224 && !can_do_io(env)) {
225 cpu_io_recompile(env, retaddr);
226 }
bellardb769d8f2004-10-03 15:07:13 +0000227
pbrook2e70f6e2008-06-29 01:03:05 +0000228 env->mem_io_vaddr = addr;
Blue Swirl20503962012-04-09 14:20:20 +0000229 env->mem_io_pc = retaddr;
bellardb769d8f2004-10-03 15:07:13 +0000230#if SHIFT <= 2
Avi Kivity37ec01d2012-03-08 18:08:35 +0200231 io_mem_write(mr, physaddr, val, 1 << SHIFT);
bellardb769d8f2004-10-03 15:07:13 +0000232#else
233#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +0200234 io_mem_write(mr, physaddr, (val >> 32), 4);
235 io_mem_write(mr, physaddr + 4, (uint32_t)val, 4);
bellardb769d8f2004-10-03 15:07:13 +0000236#else
Avi Kivity37ec01d2012-03-08 18:08:35 +0200237 io_mem_write(mr, physaddr, (uint32_t)val, 4);
238 io_mem_write(mr, physaddr + 4, val >> 32, 4);
bellardb769d8f2004-10-03 15:07:13 +0000239#endif
240#endif /* SHIFT > 2 */
241}
bellardb92e5a22003-08-08 23:58:05 +0000242
Blue Swirl89c33332012-09-02 15:28:56 +0000243void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
244 target_ulong addr, DATA_TYPE val,
245 int mmu_idx)
bellardb92e5a22003-08-08 23:58:05 +0000246{
Paul Brook355b1942010-04-05 00:28:53 +0100247 target_phys_addr_t ioaddr;
bellardc27004e2005-01-03 23:35:10 +0000248 target_ulong tlb_addr;
Blue Swirl20503962012-04-09 14:20:20 +0000249 uintptr_t retaddr;
bellard61382a52003-10-27 21:22:23 +0000250 int index;
ths3b46e622007-09-17 08:09:54 +0000251
bellardb92e5a22003-08-08 23:58:05 +0000252 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
253 redo:
j_mayer6ebbf392007-10-14 07:07:08 +0000254 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
bellardb92e5a22003-08-08 23:58:05 +0000255 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
bellardb92e5a22003-08-08 23:58:05 +0000256 if (tlb_addr & ~TARGET_PAGE_MASK) {
257 /* IO access */
258 if ((addr & (DATA_SIZE - 1)) != 0)
259 goto do_unaligned_access;
bellardd720b932004-04-25 17:57:43 +0000260 retaddr = GETPC();
Avi Kivity37ec01d2012-03-08 18:08:35 +0200261 ioaddr = env->iotlb[mmu_idx][index];
Blue Swirl89c33332012-09-02 15:28:56 +0000262 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
bellard98699962005-11-26 10:29:22 +0000263 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
bellardb92e5a22003-08-08 23:58:05 +0000264 do_unaligned_access:
bellard61382a52003-10-27 21:22:23 +0000265 retaddr = GETPC();
bellarda64d4712005-12-05 19:57:57 +0000266#ifdef ALIGNED_ONLY
Blue Swirl89c33332012-09-02 15:28:56 +0000267 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
bellarda64d4712005-12-05 19:57:57 +0000268#endif
Blue Swirl89c33332012-09-02 15:28:56 +0000269 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(env, addr, val,
j_mayer6ebbf392007-10-14 07:07:08 +0000270 mmu_idx, retaddr);
bellardb92e5a22003-08-08 23:58:05 +0000271 } else {
272 /* aligned/unaligned access in the same page */
Stefan Weilb0659272012-04-12 14:14:51 +0200273 uintptr_t addend;
bellarda64d4712005-12-05 19:57:57 +0000274#ifdef ALIGNED_ONLY
275 if ((addr & (DATA_SIZE - 1)) != 0) {
276 retaddr = GETPC();
Blue Swirl89c33332012-09-02 15:28:56 +0000277 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
bellarda64d4712005-12-05 19:57:57 +0000278 }
279#endif
pbrook0f459d12008-06-09 00:20:13 +0000280 addend = env->tlb_table[mmu_idx][index].addend;
Stefan Weilb0659272012-04-12 14:14:51 +0200281 glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
282 (addr + addend), val);
bellardb92e5a22003-08-08 23:58:05 +0000283 }
284 } else {
285 /* the page is not in the TLB : fill it */
bellard61382a52003-10-27 21:22:23 +0000286 retaddr = GETPC();
bellarda64d4712005-12-05 19:57:57 +0000287#ifdef ALIGNED_ONLY
288 if ((addr & (DATA_SIZE - 1)) != 0)
Blue Swirl89c33332012-09-02 15:28:56 +0000289 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
bellarda64d4712005-12-05 19:57:57 +0000290#endif
Blue Swirlbccd9ec2011-07-04 20:57:05 +0000291 tlb_fill(env, addr, 1, mmu_idx, retaddr);
bellardb92e5a22003-08-08 23:58:05 +0000292 goto redo;
293 }
294}
295
296/* handles all unaligned cases */
Blue Swirl89c33332012-09-02 15:28:56 +0000297static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
Blue Swirle141ab52011-09-18 14:55:46 +0000298 target_ulong addr,
bellard61382a52003-10-27 21:22:23 +0000299 DATA_TYPE val,
j_mayer6ebbf392007-10-14 07:07:08 +0000300 int mmu_idx,
Blue Swirl20503962012-04-09 14:20:20 +0000301 uintptr_t retaddr)
bellardb92e5a22003-08-08 23:58:05 +0000302{
Paul Brook355b1942010-04-05 00:28:53 +0100303 target_phys_addr_t ioaddr;
bellardc27004e2005-01-03 23:35:10 +0000304 target_ulong tlb_addr;
bellard61382a52003-10-27 21:22:23 +0000305 int index, i;
bellardb92e5a22003-08-08 23:58:05 +0000306
bellardb92e5a22003-08-08 23:58:05 +0000307 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
308 redo:
j_mayer6ebbf392007-10-14 07:07:08 +0000309 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
bellardb92e5a22003-08-08 23:58:05 +0000310 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
bellardb92e5a22003-08-08 23:58:05 +0000311 if (tlb_addr & ~TARGET_PAGE_MASK) {
312 /* IO access */
313 if ((addr & (DATA_SIZE - 1)) != 0)
314 goto do_unaligned_access;
Avi Kivity37ec01d2012-03-08 18:08:35 +0200315 ioaddr = env->iotlb[mmu_idx][index];
Blue Swirl89c33332012-09-02 15:28:56 +0000316 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
bellard98699962005-11-26 10:29:22 +0000317 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
bellardb92e5a22003-08-08 23:58:05 +0000318 do_unaligned_access:
319 /* XXX: not efficient, but simple */
balrog6c41b272007-11-17 12:12:29 +0000320 /* Note: relies on the fact that tlb_fill() does not remove the
321 * previous page from the TLB cache. */
balrog7221fa92007-11-17 09:53:42 +0000322 for(i = DATA_SIZE - 1; i >= 0; i--) {
bellardb92e5a22003-08-08 23:58:05 +0000323#ifdef TARGET_WORDS_BIGENDIAN
Blue Swirl89c33332012-09-02 15:28:56 +0000324 glue(slow_stb, MMUSUFFIX)(env, addr + i,
Blue Swirle141ab52011-09-18 14:55:46 +0000325 val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
j_mayer6ebbf392007-10-14 07:07:08 +0000326 mmu_idx, retaddr);
bellardb92e5a22003-08-08 23:58:05 +0000327#else
Blue Swirl89c33332012-09-02 15:28:56 +0000328 glue(slow_stb, MMUSUFFIX)(env, addr + i,
Blue Swirle141ab52011-09-18 14:55:46 +0000329 val >> (i * 8),
j_mayer6ebbf392007-10-14 07:07:08 +0000330 mmu_idx, retaddr);
bellardb92e5a22003-08-08 23:58:05 +0000331#endif
332 }
333 } else {
334 /* aligned/unaligned access in the same page */
Stefan Weilb0659272012-04-12 14:14:51 +0200335 uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
336 glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
337 (addr + addend), val);
bellardb92e5a22003-08-08 23:58:05 +0000338 }
339 } else {
340 /* the page is not in the TLB : fill it */
Blue Swirlbccd9ec2011-07-04 20:57:05 +0000341 tlb_fill(env, addr, 1, mmu_idx, retaddr);
bellardb92e5a22003-08-08 23:58:05 +0000342 goto redo;
343 }
344}
345
bellardb769d8f2004-10-03 15:07:13 +0000346#endif /* !defined(SOFTMMU_CODE_ACCESS) */
347
348#undef READ_ACCESS_TYPE
bellardb92e5a22003-08-08 23:58:05 +0000349#undef SHIFT
350#undef DATA_TYPE
351#undef SUFFIX
bellard61382a52003-10-27 21:22:23 +0000352#undef USUFFIX
bellardb92e5a22003-08-08 23:58:05 +0000353#undef DATA_SIZE
bellard84b7b8e2005-11-28 21:19:04 +0000354#undef ADDR_READ