blob: 10de7a55236a462f9299c60fc4f527c7ffe66ae3 [file] [log] [blame]
cmchaof3354b02010-05-31 23:54:17 +08001/*
2 * TI OMAP general purpose memory controller emulation.
3 *
4 * Copyright (C) 2007-2009 Nokia Corporation
5 * Original code written by Andrzej Zaborowski <andrew@openedhand.com>
6 * Enhancements for OMAP3 and NAND support written by Juha Riihimäki
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 or
11 * (at your option) any later version of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
Markus Armbruster64552b62019-08-12 07:23:42 +020021
Peter Maydell0d1c9782016-01-26 18:17:17 +000022#include "qemu/osdep.h"
Markus Armbruster64552b62019-08-12 07:23:42 +020023#include "hw/irq.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010024#include "hw/block/flash.h"
25#include "hw/arm/omap.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010026#include "exec/memory.h"
27#include "exec/address-spaces.h"
cmchaof3354b02010-05-31 23:54:17 +080028
29/* General-Purpose Memory Controller */
30struct omap_gpmc_s {
31 qemu_irq irq;
Peter Maydelleee0a1c2011-08-28 16:22:20 +000032 qemu_irq drq;
Avi Kivity64066a82011-08-15 17:17:23 +030033 MemoryRegion iomem;
Juha Riihimäki856f2df2011-08-28 16:22:20 +000034 int accept_256;
cmchaof3354b02010-05-31 23:54:17 +080035
Juha Riihimäki7c470ff2011-08-28 16:22:19 +000036 uint8_t revision;
cmchaof3354b02010-05-31 23:54:17 +080037 uint8_t sysconfig;
38 uint16_t irqst;
39 uint16_t irqen;
Peter Maydelld5c8cf92011-08-28 16:22:20 +000040 uint16_t lastirq;
cmchaof3354b02010-05-31 23:54:17 +080041 uint16_t timeout;
42 uint16_t config;
cmchaof3354b02010-05-31 23:54:17 +080043 struct omap_gpmc_cs_file_s {
44 uint32_t config[7];
Avi Kivity64066a82011-08-15 17:17:23 +030045 MemoryRegion *iomem;
46 MemoryRegion container;
Peter Maydell2a952fe2011-08-28 16:22:19 +000047 MemoryRegion nandiomem;
48 DeviceState *dev;
cmchaof3354b02010-05-31 23:54:17 +080049 } cs_file[8];
50 int ecc_cs;
51 int ecc_ptr;
52 uint32_t ecc_cfg;
53 ECCState ecc[9];
Peter Maydellef206772011-08-28 16:22:20 +000054 struct prefetch {
55 uint32_t config1; /* GPMC_PREFETCH_CONFIG1 */
56 uint32_t transfercount; /* GPMC_PREFETCH_CONFIG2:TRANSFERCOUNT */
57 int startengine; /* GPMC_PREFETCH_CONTROL:STARTENGINE */
58 int fifopointer; /* GPMC_PREFETCH_STATUS:FIFOPOINTER */
59 int count; /* GPMC_PREFETCH_STATUS:COUNTVALUE */
Peter Maydelld5c8cf92011-08-28 16:22:20 +000060 MemoryRegion iomem;
61 uint8_t fifo[64];
Peter Maydellef206772011-08-28 16:22:20 +000062 } prefetch;
cmchaof3354b02010-05-31 23:54:17 +080063};
64
Peter Maydell2a952fe2011-08-28 16:22:19 +000065#define OMAP_GPMC_8BIT 0
66#define OMAP_GPMC_16BIT 1
67#define OMAP_GPMC_NOR 0
68#define OMAP_GPMC_NAND 2
69
70static int omap_gpmc_devtype(struct omap_gpmc_cs_file_s *f)
71{
72 return (f->config[0] >> 10) & 3;
73}
74
75static int omap_gpmc_devsize(struct omap_gpmc_cs_file_s *f)
76{
77 /* devsize field is really 2 bits but we ignore the high
78 * bit to ensure consistent behaviour if the guest sets
79 * it (values 2 and 3 are reserved in the TRM)
80 */
81 return (f->config[0] >> 12) & 1;
82}
83
Peter Maydelld5c8cf92011-08-28 16:22:20 +000084/* Extract the chip-select value from the prefetch config1 register */
85static int prefetch_cs(uint32_t config1)
86{
87 return (config1 >> 24) & 7;
88}
89
90static int prefetch_threshold(uint32_t config1)
91{
92 return (config1 >> 8) & 0x7f;
93}
94
cmchaof3354b02010-05-31 23:54:17 +080095static void omap_gpmc_int_update(struct omap_gpmc_s *s)
96{
Peter Maydelld5c8cf92011-08-28 16:22:20 +000097 /* The TRM is a bit unclear, but it seems to say that
98 * the TERMINALCOUNTSTATUS bit is set only on the
99 * transition when the prefetch engine goes from
100 * active to inactive, whereas the FIFOEVENTSTATUS
101 * bit is held high as long as the fifo has at
102 * least THRESHOLD bytes available.
103 * So we do the latter here, but TERMINALCOUNTSTATUS
104 * is set elsewhere.
105 */
106 if (s->prefetch.fifopointer >= prefetch_threshold(s->prefetch.config1)) {
107 s->irqst |= 1;
108 }
109 if ((s->irqen & s->irqst) != s->lastirq) {
110 s->lastirq = s->irqen & s->irqst;
111 qemu_set_irq(s->irq, s->lastirq);
112 }
113}
114
115static void omap_gpmc_dma_update(struct omap_gpmc_s *s, int value)
116{
117 if (s->prefetch.config1 & 4) {
118 qemu_set_irq(s->drq, value);
119 }
cmchaof3354b02010-05-31 23:54:17 +0800120}
121
Peter Maydell2a952fe2011-08-28 16:22:19 +0000122/* Access functions for when a NAND-like device is mapped into memory:
123 * all addresses in the region behave like accesses to the relevant
124 * GPMC_NAND_DATA_i register (which is actually implemented to call these)
125 */
Avi Kivitya8170e52012-10-23 12:30:10 +0200126static uint64_t omap_nand_read(void *opaque, hwaddr addr,
Peter Maydell2a952fe2011-08-28 16:22:19 +0000127 unsigned size)
128{
129 struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque;
130 uint64_t v;
131 nand_setpins(f->dev, 0, 0, 0, 1, 0);
132 switch (omap_gpmc_devsize(f)) {
133 case OMAP_GPMC_8BIT:
134 v = nand_getio(f->dev);
135 if (size == 1) {
136 return v;
137 }
138 v |= (nand_getio(f->dev) << 8);
139 if (size == 2) {
140 return v;
141 }
142 v |= (nand_getio(f->dev) << 16);
143 v |= (nand_getio(f->dev) << 24);
144 return v;
145 case OMAP_GPMC_16BIT:
146 v = nand_getio(f->dev);
147 if (size == 1) {
148 /* 8 bit read from 16 bit device : probably a guest bug */
149 return v & 0xff;
150 }
151 if (size == 2) {
152 return v;
153 }
154 v |= (nand_getio(f->dev) << 16);
155 return v;
156 default:
157 abort();
158 }
159}
160
161static void omap_nand_setio(DeviceState *dev, uint64_t value,
162 int nandsize, int size)
163{
164 /* Write the specified value to the NAND device, respecting
165 * both size of the NAND device and size of the write access.
166 */
167 switch (nandsize) {
168 case OMAP_GPMC_8BIT:
169 switch (size) {
170 case 1:
171 nand_setio(dev, value & 0xff);
172 break;
173 case 2:
174 nand_setio(dev, value & 0xff);
175 nand_setio(dev, (value >> 8) & 0xff);
176 break;
177 case 4:
178 default:
179 nand_setio(dev, value & 0xff);
180 nand_setio(dev, (value >> 8) & 0xff);
181 nand_setio(dev, (value >> 16) & 0xff);
182 nand_setio(dev, (value >> 24) & 0xff);
183 break;
184 }
Peter Maydellc0465d12011-11-09 17:42:23 +0000185 break;
Peter Maydell2a952fe2011-08-28 16:22:19 +0000186 case OMAP_GPMC_16BIT:
187 switch (size) {
188 case 1:
189 /* writing to a 16bit device with 8bit access is probably a guest
190 * bug; pass the value through anyway.
191 */
192 case 2:
193 nand_setio(dev, value & 0xffff);
194 break;
195 case 4:
196 default:
197 nand_setio(dev, value & 0xffff);
198 nand_setio(dev, (value >> 16) & 0xffff);
199 break;
200 }
Peter Maydellc0465d12011-11-09 17:42:23 +0000201 break;
Peter Maydell2a952fe2011-08-28 16:22:19 +0000202 }
203}
204
Avi Kivitya8170e52012-10-23 12:30:10 +0200205static void omap_nand_write(void *opaque, hwaddr addr,
Peter Maydell2a952fe2011-08-28 16:22:19 +0000206 uint64_t value, unsigned size)
207{
208 struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque;
209 nand_setpins(f->dev, 0, 0, 0, 1, 0);
210 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
211}
212
213static const MemoryRegionOps omap_nand_ops = {
214 .read = omap_nand_read,
215 .write = omap_nand_write,
216 .endianness = DEVICE_NATIVE_ENDIAN,
217};
218
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000219static void fill_prefetch_fifo(struct omap_gpmc_s *s)
220{
221 /* Fill the prefetch FIFO by reading data from NAND.
222 * We do this synchronously, unlike the hardware which
223 * will do this asynchronously. We refill when the
224 * FIFO has THRESHOLD bytes free, and we always refill
225 * as much data as possible starting at the top end
226 * of the FIFO.
227 * (We have to refill at THRESHOLD rather than waiting
228 * for the FIFO to empty to allow for the case where
229 * the FIFO size isn't an exact multiple of THRESHOLD
230 * and we're doing DMA transfers.)
231 * This means we never need to handle wrap-around in
232 * the fifo-reading code, and the next byte of data
233 * to read is always fifo[63 - fifopointer].
234 */
235 int fptr;
236 int cs = prefetch_cs(s->prefetch.config1);
237 int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0);
238 int bytes;
239 /* Don't believe the bit of the OMAP TRM that says that COUNTVALUE
240 * and TRANSFERCOUNT are in units of 16 bit words for 16 bit NAND.
241 * Instead believe the bit that says it is always a byte count.
242 */
243 bytes = 64 - s->prefetch.fifopointer;
244 if (bytes > s->prefetch.count) {
245 bytes = s->prefetch.count;
246 }
Peter Maydell89f26e62014-05-13 16:09:39 +0100247 if (is16bit) {
248 bytes &= ~1;
249 }
250
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000251 s->prefetch.count -= bytes;
252 s->prefetch.fifopointer += bytes;
253 fptr = 64 - s->prefetch.fifopointer;
254 /* Move the existing data in the FIFO so it sits just
255 * before what we're about to read in
256 */
257 while (fptr < (64 - bytes)) {
258 s->prefetch.fifo[fptr] = s->prefetch.fifo[fptr + bytes];
259 fptr++;
260 }
261 while (fptr < 64) {
262 if (is16bit) {
263 uint32_t v = omap_nand_read(&s->cs_file[cs], 0, 2);
264 s->prefetch.fifo[fptr++] = v & 0xff;
265 s->prefetch.fifo[fptr++] = (v >> 8) & 0xff;
266 } else {
267 s->prefetch.fifo[fptr++] = omap_nand_read(&s->cs_file[cs], 0, 1);
268 }
269 }
270 if (s->prefetch.startengine && (s->prefetch.count == 0)) {
271 /* This was the final transfer: raise TERMINALCOUNTSTATUS */
272 s->irqst |= 2;
273 s->prefetch.startengine = 0;
274 }
275 /* If there are any bytes in the FIFO at this point then
276 * we must raise a DMA request (either this is a final part
277 * transfer, or we filled the FIFO in which case we certainly
278 * have THRESHOLD bytes available)
279 */
280 if (s->prefetch.fifopointer != 0) {
281 omap_gpmc_dma_update(s, 1);
282 }
283 omap_gpmc_int_update(s);
284}
285
286/* Access functions for a NAND-like device when the prefetch/postwrite
287 * engine is enabled -- all addresses in the region behave alike:
288 * data is read or written to the FIFO.
289 */
Avi Kivitya8170e52012-10-23 12:30:10 +0200290static uint64_t omap_gpmc_prefetch_read(void *opaque, hwaddr addr,
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000291 unsigned size)
292{
293 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
294 uint32_t data;
295 if (s->prefetch.config1 & 1) {
296 /* The TRM doesn't define the behaviour if you read from the
297 * FIFO when the prefetch engine is in write mode. We choose
298 * to always return zero.
299 */
300 return 0;
301 }
302 /* Note that trying to read an empty fifo repeats the last byte */
303 if (s->prefetch.fifopointer) {
304 s->prefetch.fifopointer--;
305 }
306 data = s->prefetch.fifo[63 - s->prefetch.fifopointer];
307 if (s->prefetch.fifopointer ==
308 (64 - prefetch_threshold(s->prefetch.config1))) {
309 /* We've drained THRESHOLD bytes now. So deassert the
310 * DMA request, then refill the FIFO (which will probably
311 * assert it again.)
312 */
313 omap_gpmc_dma_update(s, 0);
314 fill_prefetch_fifo(s);
315 }
316 omap_gpmc_int_update(s);
317 return data;
318}
319
Avi Kivitya8170e52012-10-23 12:30:10 +0200320static void omap_gpmc_prefetch_write(void *opaque, hwaddr addr,
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000321 uint64_t value, unsigned size)
322{
323 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
324 int cs = prefetch_cs(s->prefetch.config1);
325 if ((s->prefetch.config1 & 1) == 0) {
326 /* The TRM doesn't define the behaviour of writing to the
327 * FIFO when the prefetch engine is in read mode. We
328 * choose to ignore the write.
329 */
330 return;
331 }
332 if (s->prefetch.count == 0) {
333 /* The TRM doesn't define the behaviour of writing to the
334 * FIFO if the transfer is complete. We choose to ignore.
335 */
336 return;
337 }
338 /* The only reason we do any data buffering in postwrite
339 * mode is if we are talking to a 16 bit NAND device, in
340 * which case we need to buffer the first byte of the
341 * 16 bit word until the other byte arrives.
342 */
343 int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0);
344 if (is16bit) {
345 /* fifopointer alternates between 64 (waiting for first
346 * byte of word) and 63 (waiting for second byte)
347 */
348 if (s->prefetch.fifopointer == 64) {
349 s->prefetch.fifo[0] = value;
350 s->prefetch.fifopointer--;
351 } else {
352 value = (value << 8) | s->prefetch.fifo[0];
353 omap_nand_write(&s->cs_file[cs], 0, value, 2);
354 s->prefetch.count--;
355 s->prefetch.fifopointer = 64;
356 }
357 } else {
358 /* Just write the byte : fifopointer remains 64 at all times */
359 omap_nand_write(&s->cs_file[cs], 0, value, 1);
360 s->prefetch.count--;
361 }
362 if (s->prefetch.count == 0) {
363 /* Final transfer: raise TERMINALCOUNTSTATUS */
364 s->irqst |= 2;
365 s->prefetch.startengine = 0;
366 }
367 omap_gpmc_int_update(s);
368}
369
370static const MemoryRegionOps omap_prefetch_ops = {
371 .read = omap_gpmc_prefetch_read,
372 .write = omap_gpmc_prefetch_write,
373 .endianness = DEVICE_NATIVE_ENDIAN,
374 .impl.min_access_size = 1,
375 .impl.max_access_size = 1,
376};
377
Peter Maydell2a952fe2011-08-28 16:22:19 +0000378static MemoryRegion *omap_gpmc_cs_memregion(struct omap_gpmc_s *s, int cs)
379{
380 /* Return the MemoryRegion* to map/unmap for this chipselect */
381 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
382 if (omap_gpmc_devtype(f) == OMAP_GPMC_NOR) {
383 return f->iomem;
384 }
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000385 if ((s->prefetch.config1 & 0x80) &&
386 (prefetch_cs(s->prefetch.config1) == cs)) {
387 /* The prefetch engine is enabled for this CS: map the FIFO */
388 return &s->prefetch.iomem;
389 }
Peter Maydell2a952fe2011-08-28 16:22:19 +0000390 return &f->nandiomem;
391}
392
Peter Maydell3387bf52011-08-28 16:22:18 +0000393static void omap_gpmc_cs_map(struct omap_gpmc_s *s, int cs)
cmchaof3354b02010-05-31 23:54:17 +0800394{
Peter Maydell3387bf52011-08-28 16:22:18 +0000395 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
396 uint32_t mask = (f->config[6] >> 8) & 0xf;
397 uint32_t base = f->config[6] & 0x3f;
Peter Maydell07bc2f82011-08-28 16:22:18 +0000398 uint32_t size;
399
Peter Maydell2a952fe2011-08-28 16:22:19 +0000400 if (!f->iomem && !f->dev) {
Peter Maydell07bc2f82011-08-28 16:22:18 +0000401 return;
402 }
403
Peter Maydell3387bf52011-08-28 16:22:18 +0000404 if (!(f->config[6] & (1 << 6))) {
405 /* Do nothing unless CSVALID */
406 return;
407 }
408
cmchaof3354b02010-05-31 23:54:17 +0800409 /* TODO: check for overlapping regions and report access errors */
Juha Riihimäki856f2df2011-08-28 16:22:20 +0000410 if (mask != 0x8 && mask != 0xc && mask != 0xe && mask != 0xf
411 && !(s->accept_256 && !mask)) {
412 fprintf(stderr, "%s: invalid chip-select mask address (0x%x)\n",
413 __func__, mask);
cmchaof3354b02010-05-31 23:54:17 +0800414 }
415
Peter Maydell07bc2f82011-08-28 16:22:18 +0000416 base <<= 24;
417 size = (0x0fffffff & ~(mask << 24)) + 1;
cmchaof3354b02010-05-31 23:54:17 +0800418 /* TODO: rather than setting the size of the mapping (which should be
419 * constant), the mask should cause wrapping of the address space, so
420 * that the same memory becomes accessible at every <i>size</i> bytes
421 * starting from <i>base</i>. */
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400422 memory_region_init(&f->container, NULL, "omap-gpmc-file", size);
Peter Maydell2a952fe2011-08-28 16:22:19 +0000423 memory_region_add_subregion(&f->container, 0,
424 omap_gpmc_cs_memregion(s, cs));
Peter Maydell07bc2f82011-08-28 16:22:18 +0000425 memory_region_add_subregion(get_system_memory(), base,
426 &f->container);
cmchaof3354b02010-05-31 23:54:17 +0800427}
428
Peter Maydell3387bf52011-08-28 16:22:18 +0000429static void omap_gpmc_cs_unmap(struct omap_gpmc_s *s, int cs)
cmchaof3354b02010-05-31 23:54:17 +0800430{
Peter Maydell3387bf52011-08-28 16:22:18 +0000431 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
432 if (!(f->config[6] & (1 << 6))) {
433 /* Do nothing unless CSVALID */
434 return;
435 }
Peter Maydell2a952fe2011-08-28 16:22:19 +0000436 if (!f->iomem && !f->dev) {
Peter Maydell07bc2f82011-08-28 16:22:18 +0000437 return;
cmchaof3354b02010-05-31 23:54:17 +0800438 }
Peter Maydell07bc2f82011-08-28 16:22:18 +0000439 memory_region_del_subregion(get_system_memory(), &f->container);
Peter Maydell2a952fe2011-08-28 16:22:19 +0000440 memory_region_del_subregion(&f->container, omap_gpmc_cs_memregion(s, cs));
Paolo Bonzinid8d95812014-06-11 12:42:01 +0200441 object_unparent(OBJECT(&f->container));
cmchaof3354b02010-05-31 23:54:17 +0800442}
443
444void omap_gpmc_reset(struct omap_gpmc_s *s)
445{
446 int i;
447
448 s->sysconfig = 0;
449 s->irqst = 0;
450 s->irqen = 0;
451 omap_gpmc_int_update(s);
Peter Maydell0ec6dc72011-12-20 00:21:56 +0000452 for (i = 0; i < 8; i++) {
453 /* This has to happen before we change any of the config
454 * used to determine which memory regions are mapped or unmapped.
455 */
456 omap_gpmc_cs_unmap(s, i);
457 }
cmchaof3354b02010-05-31 23:54:17 +0800458 s->timeout = 0;
459 s->config = 0xa00;
Peter Maydellef206772011-08-28 16:22:20 +0000460 s->prefetch.config1 = 0x00004000;
461 s->prefetch.transfercount = 0x00000000;
462 s->prefetch.startengine = 0;
463 s->prefetch.fifopointer = 0;
464 s->prefetch.count = 0;
cmchaof3354b02010-05-31 23:54:17 +0800465 for (i = 0; i < 8; i ++) {
cmchaof3354b02010-05-31 23:54:17 +0800466 s->cs_file[i].config[1] = 0x101001;
467 s->cs_file[i].config[2] = 0x020201;
468 s->cs_file[i].config[3] = 0x10031003;
469 s->cs_file[i].config[4] = 0x10f1111;
470 s->cs_file[i].config[5] = 0;
Peter Maydell3387bf52011-08-28 16:22:18 +0000471 s->cs_file[i].config[6] = 0xf00;
472 /* In theory we could probe attached devices for some CFG1
473 * bits here, but we just retain them across resets as they
474 * were set initially by omap_gpmc_attach().
475 */
476 if (i == 0) {
477 s->cs_file[i].config[0] &= 0x00433e00;
478 s->cs_file[i].config[6] |= 1 << 6; /* CSVALID */
479 omap_gpmc_cs_map(s, i);
480 } else {
481 s->cs_file[i].config[0] &= 0x00403c00;
482 }
cmchaof3354b02010-05-31 23:54:17 +0800483 }
cmchaof3354b02010-05-31 23:54:17 +0800484 s->ecc_cs = 0;
485 s->ecc_ptr = 0;
486 s->ecc_cfg = 0x3fcff000;
487 for (i = 0; i < 9; i ++)
488 ecc_reset(&s->ecc[i]);
489}
490
Avi Kivitya8170e52012-10-23 12:30:10 +0200491static int gpmc_wordaccess_only(hwaddr addr)
Peter Maydell2a952fe2011-08-28 16:22:19 +0000492{
493 /* Return true if the register offset is to a register that
494 * only permits word width accesses.
495 * Non-word accesses are only OK for GPMC_NAND_DATA/ADDRESS/COMMAND
496 * for any chipselect.
497 */
498 if (addr >= 0x60 && addr <= 0x1d4) {
499 int cs = (addr - 0x60) / 0x30;
500 addr -= cs * 0x30;
501 if (addr >= 0x7c && addr < 0x88) {
502 /* GPMC_NAND_COMMAND, GPMC_NAND_ADDRESS, GPMC_NAND_DATA */
503 return 0;
504 }
505 }
506 return 1;
507}
508
Avi Kivitya8170e52012-10-23 12:30:10 +0200509static uint64_t omap_gpmc_read(void *opaque, hwaddr addr,
Avi Kivity64066a82011-08-15 17:17:23 +0300510 unsigned size)
cmchaof3354b02010-05-31 23:54:17 +0800511{
512 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
513 int cs;
514 struct omap_gpmc_cs_file_s *f;
515
Peter Maydell2a952fe2011-08-28 16:22:19 +0000516 if (size != 4 && gpmc_wordaccess_only(addr)) {
Avi Kivity64066a82011-08-15 17:17:23 +0300517 return omap_badwidth_read32(opaque, addr);
518 }
519
cmchaof3354b02010-05-31 23:54:17 +0800520 switch (addr) {
521 case 0x000: /* GPMC_REVISION */
Juha Riihimäki7c470ff2011-08-28 16:22:19 +0000522 return s->revision;
cmchaof3354b02010-05-31 23:54:17 +0800523
524 case 0x010: /* GPMC_SYSCONFIG */
525 return s->sysconfig;
526
527 case 0x014: /* GPMC_SYSSTATUS */
528 return 1; /* RESETDONE */
529
530 case 0x018: /* GPMC_IRQSTATUS */
531 return s->irqst;
532
533 case 0x01c: /* GPMC_IRQENABLE */
534 return s->irqen;
535
536 case 0x040: /* GPMC_TIMEOUT_CONTROL */
537 return s->timeout;
538
539 case 0x044: /* GPMC_ERR_ADDRESS */
540 case 0x048: /* GPMC_ERR_TYPE */
541 return 0;
542
543 case 0x050: /* GPMC_CONFIG */
544 return s->config;
545
546 case 0x054: /* GPMC_STATUS */
547 return 0x001;
548
549 case 0x060 ... 0x1d4:
550 cs = (addr - 0x060) / 0x30;
551 addr -= cs * 0x30;
552 f = s->cs_file + cs;
553 switch (addr) {
Peter Maydell9ed3e1b2011-08-28 16:22:19 +0000554 case 0x60: /* GPMC_CONFIG1 */
555 return f->config[0];
556 case 0x64: /* GPMC_CONFIG2 */
557 return f->config[1];
558 case 0x68: /* GPMC_CONFIG3 */
559 return f->config[2];
560 case 0x6c: /* GPMC_CONFIG4 */
561 return f->config[3];
562 case 0x70: /* GPMC_CONFIG5 */
563 return f->config[4];
564 case 0x74: /* GPMC_CONFIG6 */
565 return f->config[5];
566 case 0x78: /* GPMC_CONFIG7 */
567 return f->config[6];
Peter Maydell2a952fe2011-08-28 16:22:19 +0000568 case 0x84 ... 0x87: /* GPMC_NAND_DATA */
569 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
570 return omap_nand_read(f, 0, size);
571 }
Peter Maydell9ed3e1b2011-08-28 16:22:19 +0000572 return 0;
cmchaof3354b02010-05-31 23:54:17 +0800573 }
574 break;
575
576 case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */
Peter Maydellef206772011-08-28 16:22:20 +0000577 return s->prefetch.config1;
cmchaof3354b02010-05-31 23:54:17 +0800578 case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */
Peter Maydellef206772011-08-28 16:22:20 +0000579 return s->prefetch.transfercount;
cmchaof3354b02010-05-31 23:54:17 +0800580 case 0x1ec: /* GPMC_PREFETCH_CONTROL */
Peter Maydellef206772011-08-28 16:22:20 +0000581 return s->prefetch.startengine;
cmchaof3354b02010-05-31 23:54:17 +0800582 case 0x1f0: /* GPMC_PREFETCH_STATUS */
Peter Maydell71963452011-09-17 19:51:48 +0100583 /* NB: The OMAP3 TRM is inconsistent about whether the GPMC
584 * FIFOTHRESHOLDSTATUS bit should be set when
585 * FIFOPOINTER > FIFOTHRESHOLD or when it is >= FIFOTHRESHOLD.
586 * Apparently the underlying functional spec from which the TRM was
587 * created states that the behaviour is ">=", and this also
588 * makes more conceptual sense.
589 */
Peter Maydellef206772011-08-28 16:22:20 +0000590 return (s->prefetch.fifopointer << 24) |
591 ((s->prefetch.fifopointer >=
592 ((s->prefetch.config1 >> 8) & 0x7f) ? 1 : 0) << 16) |
593 s->prefetch.count;
cmchaof3354b02010-05-31 23:54:17 +0800594
595 case 0x1f4: /* GPMC_ECC_CONFIG */
596 return s->ecc_cs;
597 case 0x1f8: /* GPMC_ECC_CONTROL */
598 return s->ecc_ptr;
599 case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */
600 return s->ecc_cfg;
601 case 0x200 ... 0x220: /* GPMC_ECC_RESULT */
602 cs = (addr & 0x1f) >> 2;
603 /* TODO: check correctness */
604 return
605 ((s->ecc[cs].cp & 0x07) << 0) |
606 ((s->ecc[cs].cp & 0x38) << 13) |
607 ((s->ecc[cs].lp[0] & 0x1ff) << 3) |
608 ((s->ecc[cs].lp[1] & 0x1ff) << 19);
609
610 case 0x230: /* GPMC_TESTMODE_CTRL */
611 return 0;
612 case 0x234: /* GPMC_PSA_LSB */
613 case 0x238: /* GPMC_PSA_MSB */
614 return 0x00000000;
615 }
616
617 OMAP_BAD_REG(addr);
618 return 0;
619}
620
Avi Kivitya8170e52012-10-23 12:30:10 +0200621static void omap_gpmc_write(void *opaque, hwaddr addr,
Avi Kivity64066a82011-08-15 17:17:23 +0300622 uint64_t value, unsigned size)
cmchaof3354b02010-05-31 23:54:17 +0800623{
624 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
625 int cs;
626 struct omap_gpmc_cs_file_s *f;
627
Peter Maydell2a952fe2011-08-28 16:22:19 +0000628 if (size != 4 && gpmc_wordaccess_only(addr)) {
Stefan Weil77a82572015-03-08 19:21:13 +0100629 omap_badwidth_write32(opaque, addr, value);
630 return;
Avi Kivity64066a82011-08-15 17:17:23 +0300631 }
632
cmchaof3354b02010-05-31 23:54:17 +0800633 switch (addr) {
634 case 0x000: /* GPMC_REVISION */
635 case 0x014: /* GPMC_SYSSTATUS */
636 case 0x054: /* GPMC_STATUS */
637 case 0x1f0: /* GPMC_PREFETCH_STATUS */
638 case 0x200 ... 0x220: /* GPMC_ECC_RESULT */
639 case 0x234: /* GPMC_PSA_LSB */
640 case 0x238: /* GPMC_PSA_MSB */
641 OMAP_RO_REG(addr);
642 break;
643
644 case 0x010: /* GPMC_SYSCONFIG */
645 if ((value >> 3) == 0x3)
Avi Kivity64066a82011-08-15 17:17:23 +0300646 fprintf(stderr, "%s: bad SDRAM idle mode %"PRIi64"\n",
Alistair Francisa89f3642017-11-08 14:56:31 -0800647 __func__, value >> 3);
cmchaof3354b02010-05-31 23:54:17 +0800648 if (value & 2)
649 omap_gpmc_reset(s);
650 s->sysconfig = value & 0x19;
651 break;
652
653 case 0x018: /* GPMC_IRQSTATUS */
Peter Maydell7e36b262011-09-17 19:51:49 +0100654 s->irqst &= ~value;
cmchaof3354b02010-05-31 23:54:17 +0800655 omap_gpmc_int_update(s);
656 break;
657
658 case 0x01c: /* GPMC_IRQENABLE */
659 s->irqen = value & 0xf03;
660 omap_gpmc_int_update(s);
661 break;
662
663 case 0x040: /* GPMC_TIMEOUT_CONTROL */
664 s->timeout = value & 0x1ff1;
665 break;
666
667 case 0x044: /* GPMC_ERR_ADDRESS */
668 case 0x048: /* GPMC_ERR_TYPE */
669 break;
670
671 case 0x050: /* GPMC_CONFIG */
672 s->config = value & 0xf13;
673 break;
674
675 case 0x060 ... 0x1d4:
676 cs = (addr - 0x060) / 0x30;
677 addr -= cs * 0x30;
678 f = s->cs_file + cs;
679 switch (addr) {
Peter Maydell9ed3e1b2011-08-28 16:22:19 +0000680 case 0x60: /* GPMC_CONFIG1 */
681 f->config[0] = value & 0xffef3e13;
682 break;
683 case 0x64: /* GPMC_CONFIG2 */
684 f->config[1] = value & 0x001f1f8f;
685 break;
686 case 0x68: /* GPMC_CONFIG3 */
687 f->config[2] = value & 0x001f1f8f;
688 break;
689 case 0x6c: /* GPMC_CONFIG4 */
690 f->config[3] = value & 0x1f8f1f8f;
691 break;
692 case 0x70: /* GPMC_CONFIG5 */
693 f->config[4] = value & 0x0f1f1f1f;
694 break;
695 case 0x74: /* GPMC_CONFIG6 */
696 f->config[5] = value & 0x00000fcf;
697 break;
698 case 0x78: /* GPMC_CONFIG7 */
699 if ((f->config[6] ^ value) & 0xf7f) {
700 omap_gpmc_cs_unmap(s, cs);
701 f->config[6] = value & 0x00000f7f;
702 omap_gpmc_cs_map(s, cs);
703 }
704 break;
Peter Maydell2a952fe2011-08-28 16:22:19 +0000705 case 0x7c ... 0x7f: /* GPMC_NAND_COMMAND */
706 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
707 nand_setpins(f->dev, 1, 0, 0, 1, 0); /* CLE */
708 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
709 }
Peter Maydell9ed3e1b2011-08-28 16:22:19 +0000710 break;
Peter Maydell2a952fe2011-08-28 16:22:19 +0000711 case 0x80 ... 0x83: /* GPMC_NAND_ADDRESS */
712 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
713 nand_setpins(f->dev, 0, 1, 0, 1, 0); /* ALE */
714 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
715 }
716 break;
717 case 0x84 ... 0x87: /* GPMC_NAND_DATA */
718 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
719 omap_nand_write(f, 0, value, size);
720 }
721 break;
Peter Maydell9ed3e1b2011-08-28 16:22:19 +0000722 default:
723 goto bad_reg;
cmchaof3354b02010-05-31 23:54:17 +0800724 }
725 break;
726
727 case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000728 if (!s->prefetch.startengine) {
Peter Maydell0ec6dc72011-12-20 00:21:56 +0000729 uint32_t newconfig1 = value & 0x7f8f7fbf;
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000730 uint32_t changed;
Peter Maydell0ec6dc72011-12-20 00:21:56 +0000731 changed = newconfig1 ^ s->prefetch.config1;
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000732 if (changed & (0x80 | 0x7000000)) {
733 /* Turning the engine on or off, or mapping it somewhere else.
734 * cs_map() and cs_unmap() check the prefetch config and
735 * overall CSVALID bits, so it is sufficient to unmap-and-map
Peter Maydell0ec6dc72011-12-20 00:21:56 +0000736 * both the old cs and the new one. Note that we adhere to
737 * the "unmap/change config/map" order (and not unmap twice
738 * if newcs == oldcs), otherwise we'll try to delete the wrong
739 * memory region.
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000740 */
Peter Maydell0ec6dc72011-12-20 00:21:56 +0000741 int oldcs = prefetch_cs(s->prefetch.config1);
742 int newcs = prefetch_cs(newconfig1);
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000743 omap_gpmc_cs_unmap(s, oldcs);
Peter Maydell0ec6dc72011-12-20 00:21:56 +0000744 if (oldcs != newcs) {
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000745 omap_gpmc_cs_unmap(s, newcs);
Peter Maydell0ec6dc72011-12-20 00:21:56 +0000746 }
747 s->prefetch.config1 = newconfig1;
748 omap_gpmc_cs_map(s, oldcs);
749 if (oldcs != newcs) {
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000750 omap_gpmc_cs_map(s, newcs);
751 }
Peter Maydell0ec6dc72011-12-20 00:21:56 +0000752 } else {
753 s->prefetch.config1 = newconfig1;
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000754 }
755 }
cmchaof3354b02010-05-31 23:54:17 +0800756 break;
757
758 case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000759 if (!s->prefetch.startengine) {
760 s->prefetch.transfercount = value & 0x3fff;
761 }
cmchaof3354b02010-05-31 23:54:17 +0800762 break;
763
764 case 0x1ec: /* GPMC_PREFETCH_CONTROL */
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000765 if (s->prefetch.startengine != (value & 1)) {
766 s->prefetch.startengine = value & 1;
767 if (s->prefetch.startengine) {
768 /* Prefetch engine start */
769 s->prefetch.count = s->prefetch.transfercount;
770 if (s->prefetch.config1 & 1) {
771 /* Write */
772 s->prefetch.fifopointer = 64;
773 } else {
774 /* Read */
775 s->prefetch.fifopointer = 0;
776 fill_prefetch_fifo(s);
777 }
Peter Maydellef206772011-08-28 16:22:20 +0000778 } else {
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000779 /* Prefetch engine forcibly stopped. The TRM
780 * doesn't define the behaviour if you do this.
781 * We clear the prefetch count, which means that
782 * we permit no more writes, and don't read any
783 * more data from NAND. The CPU can still drain
784 * the FIFO of unread data.
785 */
786 s->prefetch.count = 0;
Peter Maydellef206772011-08-28 16:22:20 +0000787 }
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000788 omap_gpmc_int_update(s);
cmchaof3354b02010-05-31 23:54:17 +0800789 }
cmchaof3354b02010-05-31 23:54:17 +0800790 break;
791
792 case 0x1f4: /* GPMC_ECC_CONFIG */
793 s->ecc_cs = 0x8f;
794 break;
795 case 0x1f8: /* GPMC_ECC_CONTROL */
796 if (value & (1 << 8))
797 for (cs = 0; cs < 9; cs ++)
798 ecc_reset(&s->ecc[cs]);
799 s->ecc_ptr = value & 0xf;
800 if (s->ecc_ptr == 0 || s->ecc_ptr > 9) {
801 s->ecc_ptr = 0;
802 s->ecc_cs &= ~1;
803 }
804 break;
805 case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */
806 s->ecc_cfg = value & 0x3fcff1ff;
807 break;
808 case 0x230: /* GPMC_TESTMODE_CTRL */
809 if (value & 7)
Alistair Francisa89f3642017-11-08 14:56:31 -0800810 fprintf(stderr, "%s: test mode enable attempt\n", __func__);
cmchaof3354b02010-05-31 23:54:17 +0800811 break;
812
813 default:
814 bad_reg:
815 OMAP_BAD_REG(addr);
816 return;
817 }
818}
819
Avi Kivity64066a82011-08-15 17:17:23 +0300820static const MemoryRegionOps omap_gpmc_ops = {
821 .read = omap_gpmc_read,
822 .write = omap_gpmc_write,
823 .endianness = DEVICE_NATIVE_ENDIAN,
cmchaof3354b02010-05-31 23:54:17 +0800824};
825
Juha Riihimäkib5325c22011-08-28 16:22:19 +0000826struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu,
Avi Kivitya8170e52012-10-23 12:30:10 +0200827 hwaddr base,
Peter Maydelleee0a1c2011-08-28 16:22:20 +0000828 qemu_irq irq, qemu_irq drq)
cmchaof3354b02010-05-31 23:54:17 +0800829{
Peter Maydell2a952fe2011-08-28 16:22:19 +0000830 int cs;
Markus Armbrusterb45c03f2015-09-07 10:39:27 +0100831 struct omap_gpmc_s *s = g_new0(struct omap_gpmc_s, 1);
cmchaof3354b02010-05-31 23:54:17 +0800832
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400833 memory_region_init_io(&s->iomem, NULL, &omap_gpmc_ops, s, "omap-gpmc", 0x1000);
Avi Kivity64066a82011-08-15 17:17:23 +0300834 memory_region_add_subregion(get_system_memory(), base, &s->iomem);
cmchaof3354b02010-05-31 23:54:17 +0800835
Peter Maydell77c6c732011-08-28 16:22:18 +0000836 s->irq = irq;
Peter Maydelleee0a1c2011-08-28 16:22:20 +0000837 s->drq = drq;
Juha Riihimäki856f2df2011-08-28 16:22:20 +0000838 s->accept_256 = cpu_is_omap3630(mpu);
Juha Riihimäki7c470ff2011-08-28 16:22:19 +0000839 s->revision = cpu_class_omap3(mpu) ? 0x50 : 0x20;
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000840 s->lastirq = 0;
Peter Maydell07bc2f82011-08-28 16:22:18 +0000841 omap_gpmc_reset(s);
842
Peter Maydell2a952fe2011-08-28 16:22:19 +0000843 /* We have to register a different IO memory handler for each
844 * chip select region in case a NAND device is mapped there. We
845 * make the region the worst-case size of 256MB and rely on the
846 * container memory region in cs_map to chop it down to the actual
847 * guest-requested size.
848 */
849 for (cs = 0; cs < 8; cs++) {
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400850 memory_region_init_io(&s->cs_file[cs].nandiomem, NULL,
Peter Maydell2a952fe2011-08-28 16:22:19 +0000851 &omap_nand_ops,
852 &s->cs_file[cs],
853 "omap-nand",
854 256 * 1024 * 1024);
855 }
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000856
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400857 memory_region_init_io(&s->prefetch.iomem, NULL, &omap_prefetch_ops, s,
Peter Maydelld5c8cf92011-08-28 16:22:20 +0000858 "omap-gpmc-prefetch", 256 * 1024 * 1024);
cmchaof3354b02010-05-31 23:54:17 +0800859 return s;
860}
861
Peter Maydell07bc2f82011-08-28 16:22:18 +0000862void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem)
cmchaof3354b02010-05-31 23:54:17 +0800863{
864 struct omap_gpmc_cs_file_s *f;
Peter Maydell07bc2f82011-08-28 16:22:18 +0000865 assert(iomem);
cmchaof3354b02010-05-31 23:54:17 +0800866
867 if (cs < 0 || cs >= 8) {
Alistair Francisa89f3642017-11-08 14:56:31 -0800868 fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs);
cmchaof3354b02010-05-31 23:54:17 +0800869 exit(-1);
870 }
871 f = &s->cs_file[cs];
872
Peter Maydell3387bf52011-08-28 16:22:18 +0000873 omap_gpmc_cs_unmap(s, cs);
Peter Maydell2a952fe2011-08-28 16:22:19 +0000874 f->config[0] &= ~(0xf << 10);
Avi Kivity64066a82011-08-15 17:17:23 +0300875 f->iomem = iomem;
Peter Maydell3387bf52011-08-28 16:22:18 +0000876 omap_gpmc_cs_map(s, cs);
cmchaof3354b02010-05-31 23:54:17 +0800877}
Peter Maydell2a952fe2011-08-28 16:22:19 +0000878
879void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand)
880{
881 struct omap_gpmc_cs_file_s *f;
882 assert(nand);
883
884 if (cs < 0 || cs >= 8) {
885 fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs);
886 exit(-1);
887 }
888 f = &s->cs_file[cs];
889
890 omap_gpmc_cs_unmap(s, cs);
891 f->config[0] &= ~(0xf << 10);
892 f->config[0] |= (OMAP_GPMC_NAND << 10);
893 f->dev = nand;
894 if (nand_getbuswidth(f->dev) == 16) {
895 f->config[0] |= OMAP_GPMC_16BIT << 12;
896 }
897 omap_gpmc_cs_map(s, cs);
898}