blob: 9c35d1b9da6c36198b6f49e99db6eb49cdfae058 [file] [log] [blame]
bellard5fe141f2006-04-23 17:12:42 +00001static void glue(bswap_ehdr, SZ)(struct elfhdr *ehdr)
2{
Yeqi Fu48805df2023-03-15 11:26:49 +08003 bswap16s(&ehdr->e_type); /* Object file type */
4 bswap16s(&ehdr->e_machine); /* Architecture */
5 bswap32s(&ehdr->e_version); /* Object file version */
6 bswapSZs(&ehdr->e_entry); /* Entry point virtual address */
7 bswapSZs(&ehdr->e_phoff); /* Program header table file offset */
8 bswapSZs(&ehdr->e_shoff); /* Section header table file offset */
9 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
10 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
11 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
12 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
13 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
14 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
15 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
bellard5fe141f2006-04-23 17:12:42 +000016}
17
18static void glue(bswap_phdr, SZ)(struct elf_phdr *phdr)
19{
Yeqi Fu48805df2023-03-15 11:26:49 +080020 bswap32s(&phdr->p_type); /* Segment type */
21 bswapSZs(&phdr->p_offset); /* Segment file offset */
22 bswapSZs(&phdr->p_vaddr); /* Segment virtual address */
23 bswapSZs(&phdr->p_paddr); /* Segment physical address */
24 bswapSZs(&phdr->p_filesz); /* Segment size in file */
25 bswapSZs(&phdr->p_memsz); /* Segment size in memory */
26 bswap32s(&phdr->p_flags); /* Segment flags */
27 bswapSZs(&phdr->p_align); /* Segment alignment */
bellard5fe141f2006-04-23 17:12:42 +000028}
29
30static void glue(bswap_shdr, SZ)(struct elf_shdr *shdr)
31{
32 bswap32s(&shdr->sh_name);
33 bswap32s(&shdr->sh_type);
34 bswapSZs(&shdr->sh_flags);
35 bswapSZs(&shdr->sh_addr);
36 bswapSZs(&shdr->sh_offset);
37 bswapSZs(&shdr->sh_size);
38 bswap32s(&shdr->sh_link);
39 bswap32s(&shdr->sh_info);
40 bswapSZs(&shdr->sh_addralign);
41 bswapSZs(&shdr->sh_entsize);
42}
43
44static void glue(bswap_sym, SZ)(struct elf_sym *sym)
45{
46 bswap32s(&sym->st_name);
47 bswapSZs(&sym->st_value);
48 bswapSZs(&sym->st_size);
49 bswap16s(&sym->st_shndx);
50}
51
Thomas Huth5dce07e2015-03-09 11:12:52 +010052static void glue(bswap_rela, SZ)(struct elf_rela *rela)
53{
54 bswapSZs(&rela->r_offset);
55 bswapSZs(&rela->r_info);
56 bswapSZs((elf_word *)&rela->r_addend);
57}
58
ths5fafdf22007-09-16 21:08:06 +000059static struct elf_shdr *glue(find_section, SZ)(struct elf_shdr *shdr_table,
bellard5fe141f2006-04-23 17:12:42 +000060 int n, int type)
61{
62 int i;
63 for(i=0;i<n;i++) {
64 if (shdr_table[i].sh_type == type)
65 return shdr_table + i;
66 }
67 return NULL;
68}
69
pbrook49918a72008-10-22 15:11:31 +000070static int glue(symfind, SZ)(const void *s0, const void *s1)
71{
Avi Kivitya8170e52012-10-23 12:30:10 +020072 hwaddr addr = *(hwaddr *)s0;
pbrook49918a72008-10-22 15:11:31 +000073 struct elf_sym *sym = (struct elf_sym *)s1;
74 int result = 0;
Stefan Weilc7c530c2012-01-05 15:39:39 +010075 if (addr < sym->st_value) {
pbrook49918a72008-10-22 15:11:31 +000076 result = -1;
Stefan Weilc7c530c2012-01-05 15:39:39 +010077 } else if (addr >= sym->st_value + sym->st_size) {
pbrook49918a72008-10-22 15:11:31 +000078 result = 1;
79 }
80 return result;
81}
82
Blue Swirlca20cf32009-09-20 14:58:02 +000083static const char *glue(lookup_symbol, SZ)(struct syminfo *s,
Avi Kivitya8170e52012-10-23 12:30:10 +020084 hwaddr orig_addr)
pbrook49918a72008-10-22 15:11:31 +000085{
86 struct elf_sym *syms = glue(s->disas_symtab.elf, SZ);
pbrook49918a72008-10-22 15:11:31 +000087 struct elf_sym *sym;
88
Stefan Weilc7c530c2012-01-05 15:39:39 +010089 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms),
90 glue(symfind, SZ));
Blue Swirl660f11b2009-07-31 21:16:51 +000091 if (sym != NULL) {
pbrook49918a72008-10-22 15:11:31 +000092 return s->disas_strtab + sym->st_name;
93 }
94
95 return "";
96}
97
98static int glue(symcmp, SZ)(const void *s0, const void *s1)
99{
100 struct elf_sym *sym0 = (struct elf_sym *)s0;
101 struct elf_sym *sym1 = (struct elf_sym *)s1;
102 return (sym0->st_value < sym1->st_value)
103 ? -1
104 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
105}
106
Alex Bennée040425f2020-04-03 20:11:39 +0100107static void glue(load_symbols, SZ)(struct elfhdr *ehdr, int fd, int must_swab,
108 int clear_lsb, symbol_fn_t sym_cb)
bellard5fe141f2006-04-23 17:12:42 +0000109{
Alex Bennée040425f2020-04-03 20:11:39 +0100110 struct elf_shdr *symtab, *strtab;
111 g_autofree struct elf_shdr *shdr_table = NULL;
112 g_autofree struct elf_sym *syms = NULL;
113 g_autofree char *str = NULL;
bellard5fe141f2006-04-23 17:12:42 +0000114 struct syminfo *s;
115 int nsyms, i;
bellard5fe141f2006-04-23 17:12:42 +0000116
ths5fafdf22007-09-16 21:08:06 +0000117 shdr_table = load_at(fd, ehdr->e_shoff,
bellard5fe141f2006-04-23 17:12:42 +0000118 sizeof(struct elf_shdr) * ehdr->e_shnum);
Alex Bennée040425f2020-04-03 20:11:39 +0100119 if (!shdr_table) {
Bin Mengc1dadb82022-10-24 15:28:02 +0800120 return;
Alex Bennée040425f2020-04-03 20:11:39 +0100121 }
ths3b46e622007-09-17 08:09:54 +0000122
bellard5fe141f2006-04-23 17:12:42 +0000123 if (must_swab) {
124 for (i = 0; i < ehdr->e_shnum; i++) {
125 glue(bswap_shdr, SZ)(shdr_table + i);
126 }
127 }
ths3b46e622007-09-17 08:09:54 +0000128
bellard5fe141f2006-04-23 17:12:42 +0000129 symtab = glue(find_section, SZ)(shdr_table, ehdr->e_shnum, SHT_SYMTAB);
Alex Bennée040425f2020-04-03 20:11:39 +0100130 if (!symtab) {
131 return;
132 }
bellard5fe141f2006-04-23 17:12:42 +0000133 syms = load_at(fd, symtab->sh_offset, symtab->sh_size);
Alex Bennée040425f2020-04-03 20:11:39 +0100134 if (!syms) {
135 return;
136 }
bellard5fe141f2006-04-23 17:12:42 +0000137
138 nsyms = symtab->sh_size / sizeof(struct elf_sym);
pbrook49918a72008-10-22 15:11:31 +0000139
Michael Clarka2480ff2018-03-03 01:31:12 +1300140 /* String table */
141 if (symtab->sh_link >= ehdr->e_shnum) {
Alex Bennée040425f2020-04-03 20:11:39 +0100142 return;
Michael Clarka2480ff2018-03-03 01:31:12 +1300143 }
144 strtab = &shdr_table[symtab->sh_link];
145
146 str = load_at(fd, strtab->sh_offset, strtab->sh_size);
147 if (!str) {
Alex Bennée040425f2020-04-03 20:11:39 +0100148 return;
Michael Clarka2480ff2018-03-03 01:31:12 +1300149 }
150
pbrook49918a72008-10-22 15:11:31 +0000151 i = 0;
152 while (i < nsyms) {
Michael Clarka2480ff2018-03-03 01:31:12 +1300153 if (must_swab) {
bellard5fe141f2006-04-23 17:12:42 +0000154 glue(bswap_sym, SZ)(&syms[i]);
Michael Clarka2480ff2018-03-03 01:31:12 +1300155 }
156 if (sym_cb) {
157 sym_cb(str + syms[i].st_name, syms[i].st_info,
158 syms[i].st_value, syms[i].st_size);
159 }
pbrook49918a72008-10-22 15:11:31 +0000160 /* We are only interested in function symbols.
161 Throw everything else away. */
162 if (syms[i].st_shndx == SHN_UNDEF ||
163 syms[i].st_shndx >= SHN_LORESERVE ||
164 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
165 nsyms--;
166 if (i < nsyms) {
167 syms[i] = syms[nsyms];
168 }
169 continue;
170 }
Blue Swirlca20cf32009-09-20 14:58:02 +0000171 if (clear_lsb) {
172 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
173 syms[i].st_value &= ~(glue(glue(Elf, SZ), _Addr))1;
174 }
pbrook49918a72008-10-22 15:11:31 +0000175 i++;
bellard5fe141f2006-04-23 17:12:42 +0000176 }
pbrook49918a72008-10-22 15:11:31 +0000177
Alex Bennée040425f2020-04-03 20:11:39 +0100178 /* check we have symbols left */
179 if (nsyms == 0) {
180 return;
181 }
182
183 syms = g_realloc(syms, nsyms * sizeof(*syms));
Markus Armbruster8ce3c442014-08-20 20:38:11 +0200184 qsort(syms, nsyms, sizeof(*syms), glue(symcmp, SZ));
185 for (i = 0; i < nsyms - 1; i++) {
186 if (syms[i].st_size == 0) {
187 syms[i].st_size = syms[i + 1].st_value - syms[i].st_value;
Stefan Weile403e432010-08-09 16:43:53 +0200188 }
Aurelien Jarno3e372cf2009-12-28 21:18:12 +0100189 }
pbrook49918a72008-10-22 15:11:31 +0000190
bellard5fe141f2006-04-23 17:12:42 +0000191 /* Commit */
Anthony Liguori7267c092011-08-20 22:09:37 -0500192 s = g_malloc0(sizeof(*s));
pbrook49918a72008-10-22 15:11:31 +0000193 s->lookup_symbol = glue(lookup_symbol, SZ);
Alex Bennée040425f2020-04-03 20:11:39 +0100194 glue(s->disas_symtab.elf, SZ) = g_steal_pointer(&syms);
bellard5fe141f2006-04-23 17:12:42 +0000195 s->disas_num_syms = nsyms;
Alex Bennée040425f2020-04-03 20:11:39 +0100196 s->disas_strtab = g_steal_pointer(&str);
bellard5fe141f2006-04-23 17:12:42 +0000197 s->next = syminfos;
198 syminfos = s;
bellard5fe141f2006-04-23 17:12:42 +0000199}
200
Thomas Huth5dce07e2015-03-09 11:12:52 +0100201static int glue(elf_reloc, SZ)(struct elfhdr *ehdr, int fd, int must_swab,
202 uint64_t (*translate_fn)(void *, uint64_t),
203 void *translate_opaque, uint8_t *data,
204 struct elf_phdr *ph, int elf_machine)
205{
206 struct elf_shdr *reltab, *shdr_table = NULL;
207 struct elf_rela *rels = NULL;
208 int nrels, i, ret = -1;
209 elf_word wordval;
210 void *addr;
211
212 shdr_table = load_at(fd, ehdr->e_shoff,
213 sizeof(struct elf_shdr) * ehdr->e_shnum);
214 if (!shdr_table) {
215 return -1;
216 }
217 if (must_swab) {
218 for (i = 0; i < ehdr->e_shnum; i++) {
219 glue(bswap_shdr, SZ)(&shdr_table[i]);
220 }
221 }
222
223 reltab = glue(find_section, SZ)(shdr_table, ehdr->e_shnum, SHT_RELA);
224 if (!reltab) {
225 goto fail;
226 }
227 rels = load_at(fd, reltab->sh_offset, reltab->sh_size);
228 if (!rels) {
229 goto fail;
230 }
231 nrels = reltab->sh_size / sizeof(struct elf_rela);
232
233 for (i = 0; i < nrels; i++) {
234 if (must_swab) {
235 glue(bswap_rela, SZ)(&rels[i]);
236 }
237 if (rels[i].r_offset < ph->p_vaddr ||
238 rels[i].r_offset >= ph->p_vaddr + ph->p_filesz) {
239 continue;
240 }
241 addr = &data[rels[i].r_offset - ph->p_vaddr];
242 switch (elf_machine) {
243 case EM_S390:
244 switch (rels[i].r_info) {
245 case R_390_RELATIVE:
246 wordval = *(elf_word *)addr;
247 if (must_swab) {
248 bswapSZs(&wordval);
249 }
250 wordval = translate_fn(translate_opaque, wordval);
251 if (must_swab) {
252 bswapSZs(&wordval);
253 }
254 *(elf_word *)addr = wordval;
255 break;
256 default:
257 fprintf(stderr, "Unsupported relocation type %i!\n",
258 (int)rels[i].r_info);
259 }
260 }
261 }
262
263 ret = 0;
264fail:
265 g_free(rels);
266 g_free(shdr_table);
267 return ret;
268}
269
Liam Merwick696aa042019-01-15 12:18:04 +0000270/*
271 * Given 'nhdr', a pointer to a range of ELF Notes, search through them
272 * for a note matching type 'elf_note_type' and return a pointer to
273 * the matching ELF note.
274 */
275static struct elf_note *glue(get_elf_note_type, SZ)(struct elf_note *nhdr,
276 elf_word note_size,
277 elf_word phdr_align,
278 elf_word elf_note_type)
279{
280 elf_word nhdr_size = sizeof(struct elf_note);
281 elf_word elf_note_entry_offset = 0;
282 elf_word note_type;
283 elf_word nhdr_namesz;
284 elf_word nhdr_descsz;
285
286 if (nhdr == NULL) {
287 return NULL;
288 }
289
290 note_type = nhdr->n_type;
291 while (note_type != elf_note_type) {
292 nhdr_namesz = nhdr->n_namesz;
293 nhdr_descsz = nhdr->n_descsz;
294
295 elf_note_entry_offset = nhdr_size +
296 QEMU_ALIGN_UP(nhdr_namesz, phdr_align) +
297 QEMU_ALIGN_UP(nhdr_descsz, phdr_align);
298
299 /*
300 * If the offset calculated in this iteration exceeds the
301 * supplied size, we are done and no matching note was found.
302 */
303 if (elf_note_entry_offset > note_size) {
304 return NULL;
305 }
306
307 /* skip to the next ELF Note entry */
308 nhdr = (void *)nhdr + elf_note_entry_offset;
309 note_type = nhdr->n_type;
310 }
311
312 return nhdr;
313}
314
Luc Michel8975eb82021-10-14 21:43:25 +0200315static ssize_t glue(load_elf, SZ)(const char *name, int fd,
316 uint64_t (*elf_note_fn)(void *, void *, bool),
317 uint64_t (*translate_fn)(void *, uint64_t),
318 void *translate_opaque,
319 int must_swab, uint64_t *pentry,
320 uint64_t *lowaddr, uint64_t *highaddr,
321 uint32_t *pflags, int elf_machine,
322 int clear_lsb, int data_swab,
323 AddressSpace *as, bool load_rom,
324 symbol_fn_t sym_cb)
bellard5fe141f2006-04-23 17:12:42 +0000325{
326 struct elfhdr ehdr;
327 struct elf_phdr *phdr = NULL, *ph;
Luc Michel8975eb82021-10-14 21:43:25 +0200328 int size, i;
329 ssize_t total_size;
Stefano Garzarella816b9fe2019-07-24 16:31:04 +0200330 elf_word mem_size, file_size, data_offset;
Paul Brookfd93a792009-06-05 15:16:41 +0100331 uint64_t addr, low = (uint64_t)-1, high = 0;
Stefano Garzarella816b9fe2019-07-24 16:31:04 +0200332 GMappedFile *mapped_file = NULL;
bellard9ee3c022006-04-26 22:05:26 +0000333 uint8_t *data = NULL;
Luc Michel8975eb82021-10-14 21:43:25 +0200334 ssize_t ret = ELF_LOAD_FAILED;
bellard5fe141f2006-04-23 17:12:42 +0000335
336 if (read(fd, &ehdr, sizeof(ehdr)) != sizeof(ehdr))
337 goto fail;
338 if (must_swab) {
339 glue(bswap_ehdr, SZ)(&ehdr);
340 }
341
Alistair Francis8cf6e9d2016-09-22 18:13:07 +0100342 if (elf_machine <= EM_NONE) {
343 /* The caller didn't specify an ARCH, we can figure it out */
344 elf_machine = ehdr.e_machine;
345 }
346
Blue Swirlca20cf32009-09-20 14:58:02 +0000347 switch (elf_machine) {
blueswir17f70c932009-03-13 21:16:24 +0000348 case EM_PPC64:
Peter Crosthwaite7cc47222015-08-17 21:53:16 -0700349 if (ehdr.e_machine != EM_PPC64) {
350 if (ehdr.e_machine != EM_PPC) {
Alexey Kardashevskiy18674b22014-02-04 15:04:18 +1100351 ret = ELF_LOAD_WRONG_ARCH;
blueswir17f70c932009-03-13 21:16:24 +0000352 goto fail;
Alexey Kardashevskiy18674b22014-02-04 15:04:18 +1100353 }
Peter Crosthwaite7cc47222015-08-17 21:53:16 -0700354 }
blueswir17f70c932009-03-13 21:16:24 +0000355 break;
356 case EM_X86_64:
Peter Crosthwaite7cc47222015-08-17 21:53:16 -0700357 if (ehdr.e_machine != EM_X86_64) {
358 if (ehdr.e_machine != EM_386) {
Alexey Kardashevskiy18674b22014-02-04 15:04:18 +1100359 ret = ELF_LOAD_WRONG_ARCH;
blueswir17f70c932009-03-13 21:16:24 +0000360 goto fail;
Alexey Kardashevskiy18674b22014-02-04 15:04:18 +1100361 }
Peter Crosthwaite7cc47222015-08-17 21:53:16 -0700362 }
blueswir17f70c932009-03-13 21:16:24 +0000363 break;
Edgar E. Iglesias16f04412010-05-24 11:14:04 +0200364 case EM_MICROBLAZE:
Peter Crosthwaite7cc47222015-08-17 21:53:16 -0700365 if (ehdr.e_machine != EM_MICROBLAZE) {
366 if (ehdr.e_machine != EM_MICROBLAZE_OLD) {
Alexey Kardashevskiy18674b22014-02-04 15:04:18 +1100367 ret = ELF_LOAD_WRONG_ARCH;
Edgar E. Iglesias16f04412010-05-24 11:14:04 +0200368 goto fail;
Alexey Kardashevskiy18674b22014-02-04 15:04:18 +1100369 }
Peter Crosthwaite7cc47222015-08-17 21:53:16 -0700370 }
Edgar E. Iglesias16f04412010-05-24 11:14:04 +0200371 break;
Aleksandar Rikalo56f26042018-08-07 13:03:13 +0200372 case EM_MIPS:
373 case EM_NANOMIPS:
374 if ((ehdr.e_machine != EM_MIPS) &&
375 (ehdr.e_machine != EM_NANOMIPS)) {
376 ret = ELF_LOAD_WRONG_ARCH;
377 goto fail;
378 }
379 break;
blueswir17f70c932009-03-13 21:16:24 +0000380 default:
Alexey Kardashevskiy18674b22014-02-04 15:04:18 +1100381 if (elf_machine != ehdr.e_machine) {
382 ret = ELF_LOAD_WRONG_ARCH;
blueswir17f70c932009-03-13 21:16:24 +0000383 goto fail;
Alexey Kardashevskiy18674b22014-02-04 15:04:18 +1100384 }
blueswir17f70c932009-03-13 21:16:24 +0000385 }
ths9042c0e2006-12-23 14:18:40 +0000386
Aleksandar Markovic6cdda0f2020-01-26 23:55:04 +0100387 if (pflags) {
Richard Hendersone1fee582023-09-19 16:25:54 +0200388 *pflags = ehdr.e_flags;
Aleksandar Markovic6cdda0f2020-01-26 23:55:04 +0100389 }
Richard Hendersone1fee582023-09-19 16:25:54 +0200390 if (pentry) {
391 *pentry = ehdr.e_entry;
392 }
bellard9ee3c022006-04-26 22:05:26 +0000393
Michael Clarka2480ff2018-03-03 01:31:12 +1300394 glue(load_symbols, SZ)(&ehdr, fd, must_swab, clear_lsb, sym_cb);
bellard5fe141f2006-04-23 17:12:42 +0000395
396 size = ehdr.e_phnum * sizeof(phdr[0]);
Stefan Weil23bf2e72015-03-14 16:42:01 +0100397 if (lseek(fd, ehdr.e_phoff, SEEK_SET) != ehdr.e_phoff) {
398 goto fail;
399 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500400 phdr = g_malloc0(size);
bellard5fe141f2006-04-23 17:12:42 +0000401 if (!phdr)
402 goto fail;
403 if (read(fd, phdr, size) != size)
ths04d4b0c2006-12-23 15:18:47 +0000404 goto fail;
bellard5fe141f2006-04-23 17:12:42 +0000405 if (must_swab) {
406 for(i = 0; i < ehdr.e_phnum; i++) {
407 ph = &phdr[i];
408 glue(bswap_phdr, SZ)(ph);
409 }
410 }
ths3b46e622007-09-17 08:09:54 +0000411
Stefano Garzarella816b9fe2019-07-24 16:31:04 +0200412 /*
413 * Since we want to be able to modify the mapped buffer, we set the
Philippe Mathieu-Daudéf413e512021-02-25 19:13:44 +0100414 * 'writable' parameter to 'true'. Modifications to the buffer are not
Stefano Garzarella816b9fe2019-07-24 16:31:04 +0200415 * written back to the file.
416 */
417 mapped_file = g_mapped_file_new_from_fd(fd, true, NULL);
418 if (!mapped_file) {
419 goto fail;
420 }
421
bellard5fe141f2006-04-23 17:12:42 +0000422 total_size = 0;
423 for(i = 0; i < ehdr.e_phnum; i++) {
424 ph = &phdr[i];
425 if (ph->p_type == PT_LOAD) {
Fabien Chouteaud60fa422013-02-19 04:41:11 +0000426 mem_size = ph->p_memsz; /* Size of the ROM */
427 file_size = ph->p_filesz; /* Size of the allocated data */
Stefano Garzarella816b9fe2019-07-24 16:31:04 +0200428 data_offset = ph->p_offset; /* Offset where the data is located */
429
430 if (file_size > 0) {
431 if (g_mapped_file_get_length(mapped_file) <
432 file_size + data_offset) {
ths04d4b0c2006-12-23 15:18:47 +0000433 goto fail;
Fabien Chouteaud60fa422013-02-19 04:41:11 +0000434 }
Stefano Garzarella816b9fe2019-07-24 16:31:04 +0200435
436 data = (uint8_t *)g_mapped_file_get_contents(mapped_file);
437 data += data_offset;
bellard5fe141f2006-04-23 17:12:42 +0000438 }
Peter Maydellbf173332017-09-04 15:21:53 +0100439
440 /* The ELF spec is somewhat vague about the purpose of the
441 * physical address field. One common use in the embedded world
442 * is that physical address field specifies the load address
443 * and the virtual address field specifies the execution address.
444 * Segments are packed into ROM or flash, and the relocation
445 * and zero-initialization of data is done at runtime. This
446 * means that the memsz header represents the runtime size of the
447 * segment, but the filesz represents the loadtime size. If
448 * we try to honour the memsz value for an ELF file like this
449 * we will end up with overlapping segments (which the
450 * loader.c code will later reject).
451 * We support ELF files using this scheme by by checking whether
452 * paddr + memsz for this segment would overlap with any other
453 * segment. If so, then we assume it's using this scheme and
454 * truncate the loaded segment to the filesz size.
455 * If the segment considered as being memsz size doesn't overlap
456 * then we use memsz for the segment length, to handle ELF files
457 * which assume that the loader will do the zero-initialization.
458 */
459 if (mem_size > file_size) {
460 /* If this segment's zero-init portion overlaps another
461 * segment's data or zero-init portion, then truncate this one.
462 * Invalid ELF files where the segments overlap even when
463 * only file_size bytes are loaded will be rejected by
464 * the ROM overlap check in loader.c, so we don't try to
465 * explicitly detect those here.
466 */
467 int j;
468 elf_word zero_start = ph->p_paddr + file_size;
469 elf_word zero_end = ph->p_paddr + mem_size;
470
471 for (j = 0; j < ehdr.e_phnum; j++) {
472 struct elf_phdr *jph = &phdr[j];
473
474 if (i != j && jph->p_type == PT_LOAD) {
475 elf_word other_start = jph->p_paddr;
476 elf_word other_end = jph->p_paddr + jph->p_memsz;
477
478 if (!(other_start >= zero_end ||
479 zero_start >= other_end)) {
480 mem_size = file_size;
481 break;
482 }
483 }
484 }
485 }
486
Luc Michel8975eb82021-10-14 21:43:25 +0200487 if (mem_size > SSIZE_MAX - total_size) {
Stefano Garzarella41a26352019-09-10 16:22:23 +0200488 ret = ELF_LOAD_TOO_BIG;
489 goto fail;
490 }
491
pbrook83c1f872008-10-22 18:20:20 +0000492 /* address_offset is hack for kernel images that are
493 linked at the wrong physical address. */
Aurelien Jarno409dbce2010-03-14 21:20:59 +0100494 if (translate_fn) {
495 addr = translate_fn(translate_opaque, ph->p_paddr);
Thomas Huth5dce07e2015-03-09 11:12:52 +0100496 glue(elf_reloc, SZ)(&ehdr, fd, must_swab, translate_fn,
497 translate_opaque, data, ph, elf_machine);
Aurelien Jarno409dbce2010-03-14 21:20:59 +0100498 } else {
499 addr = ph->p_paddr;
500 }
bellard5fe141f2006-04-23 17:12:42 +0000501
Peter Crosthwaite7ef295e2016-03-04 11:30:21 +0000502 if (data_swab) {
Anastasia Belova410c2a42024-01-15 12:22:16 +0300503 elf_word j;
Peter Crosthwaite7ef295e2016-03-04 11:30:21 +0000504 for (j = 0; j < file_size; j += (1 << data_swab)) {
505 uint8_t *dp = data + j;
506 switch (data_swab) {
507 case (1):
508 *(uint16_t *)dp = bswap16(*(uint16_t *)dp);
509 break;
510 case (2):
511 *(uint32_t *)dp = bswap32(*(uint32_t *)dp);
512 break;
513 case (3):
514 *(uint64_t *)dp = bswap64(*(uint64_t *)dp);
515 break;
516 default:
517 g_assert_not_reached();
518 }
519 }
520 }
521
Henning Schild7e9c7ff2012-09-05 14:56:39 +0200522 /* the entry pointer in the ELF header is a virtual
523 * address, if the text segments paddr and vaddr differ
524 * we need to adjust the entry */
525 if (pentry && !translate_fn &&
526 ph->p_vaddr != ph->p_paddr &&
527 ehdr.e_entry >= ph->p_vaddr &&
528 ehdr.e_entry < ph->p_vaddr + ph->p_filesz &&
529 ph->p_flags & PF_X) {
530 *pentry = ehdr.e_entry - ph->p_vaddr + ph->p_paddr;
531 }
532
Alex Bennée1fed4cd2024-02-07 16:38:02 +0000533 /* Some ELF files really do have segments of zero size;
534 * just ignore them rather than trying to create empty
535 * ROM blobs, because the zero-length blob can falsely
536 * trigger the overlapping-ROM-blobs check.
537 */
538 if (mem_size != 0) {
539 if (load_rom) {
540 g_autofree char *label =
541 g_strdup_printf("%s ELF program header segment %d",
542 name, i);
Peter Maydellf33e5e62017-09-04 15:21:53 +0100543
Alex Bennée1fed4cd2024-02-07 16:38:02 +0000544 /*
545 * rom_add_elf_program() takes its own reference to
546 * 'mapped_file'.
547 */
548 rom_add_elf_program(label, mapped_file, data, file_size,
549 mem_size, addr, as);
550 } else {
551 MemTxResult res;
Philippe Mathieu-Daudé5579b522020-05-18 17:53:05 +0200552
Alex Bennée1fed4cd2024-02-07 16:38:02 +0000553 res = address_space_write(as ? as : &address_space_memory,
554 addr, MEMTXATTRS_UNSPECIFIED,
555 data, file_size);
Philippe Mathieu-Daudé5579b522020-05-18 17:53:05 +0200556 if (res != MEMTX_OK) {
557 goto fail;
558 }
Alex Bennée1fed4cd2024-02-07 16:38:02 +0000559 /*
560 * We need to zero'ify the space that is not copied
561 * from file
562 */
563 if (file_size < mem_size) {
564 res = address_space_set(as ? as : &address_space_memory,
565 addr + file_size, 0,
566 mem_size - file_size,
567 MEMTXATTRS_UNSPECIFIED);
568 if (res != MEMTX_OK) {
569 goto fail;
570 }
571 }
Peter Maydellf33e5e62017-09-04 15:21:53 +0100572 }
Farhan Ali34f1b232016-11-07 10:50:30 -0500573 }
bellard5fe141f2006-04-23 17:12:42 +0000574
575 total_size += mem_size;
Paul Brookfd93a792009-06-05 15:16:41 +0100576 if (addr < low)
ths74287112007-04-01 17:56:37 +0000577 low = addr;
Paul Brookfd93a792009-06-05 15:16:41 +0100578 if ((addr + mem_size) > high)
ths74287112007-04-01 17:56:37 +0000579 high = addr + mem_size;
bellard5fe141f2006-04-23 17:12:42 +0000580
bellard9ee3c022006-04-26 22:05:26 +0000581 data = NULL;
Liam Merwick696aa042019-01-15 12:18:04 +0000582
583 } else if (ph->p_type == PT_NOTE && elf_note_fn) {
584 struct elf_note *nhdr = NULL;
585
586 file_size = ph->p_filesz; /* Size of the range of ELF notes */
Stefano Garzarella816b9fe2019-07-24 16:31:04 +0200587 data_offset = ph->p_offset; /* Offset where the notes are located */
588
589 if (file_size > 0) {
590 if (g_mapped_file_get_length(mapped_file) <
591 file_size + data_offset) {
Liam Merwick696aa042019-01-15 12:18:04 +0000592 goto fail;
593 }
Stefano Garzarella816b9fe2019-07-24 16:31:04 +0200594
595 data = (uint8_t *)g_mapped_file_get_contents(mapped_file);
596 data += data_offset;
Liam Merwick696aa042019-01-15 12:18:04 +0000597 }
598
599 /*
600 * Search the ELF notes to find one with a type matching the
601 * value passed in via 'translate_opaque'
602 */
603 nhdr = (struct elf_note *)data;
604 assert(translate_opaque != NULL);
605 nhdr = glue(get_elf_note_type, SZ)(nhdr, file_size, ph->p_align,
606 *(uint64_t *)translate_opaque);
607 if (nhdr != NULL) {
David Edmondson64d702772021-03-02 09:03:14 +0000608 elf_note_fn((void *)nhdr, (void *)&ph->p_align, SZ == 64);
Liam Merwick696aa042019-01-15 12:18:04 +0000609 }
Liam Merwick696aa042019-01-15 12:18:04 +0000610 data = NULL;
bellard5fe141f2006-04-23 17:12:42 +0000611 }
612 }
Liam Merwick4366e1d2019-01-15 12:18:03 +0000613
Richard Hendersone1fee582023-09-19 16:25:54 +0200614 if (lowaddr) {
615 *lowaddr = low;
616 }
617 if (highaddr) {
618 *highaddr = high;
619 }
Stefano Garzarella816b9fe2019-07-24 16:31:04 +0200620 ret = total_size;
ths04d4b0c2006-12-23 15:18:47 +0000621 fail:
Peter Maydella1ecb432020-04-23 21:20:11 +0100622 if (mapped_file) {
623 g_mapped_file_unref(mapped_file);
624 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500625 g_free(phdr);
Alexey Kardashevskiy18674b22014-02-04 15:04:18 +1100626 return ret;
bellard5fe141f2006-04-23 17:12:42 +0000627}