1 /* This is the Linux kernel elf-loading code, ported into user space */
15 /* this flag is uneffective under linux too, should be deleted */
17 #define MAP_DENYWRITE 0
20 /* should probably go in elf.h */
27 #define ELF_PLATFORM get_elf_platform()
29 static const char *get_elf_platform(void)
31 static char elf_platform[] = "i386";
32 int family = (global_env->cpuid_version >> 8) & 0xff;
36 elf_platform[1] = '0' + family;
40 #define ELF_HWCAP get_elf_hwcap()
42 static uint32_t get_elf_hwcap(void)
44 return global_env->cpuid_features;
47 #define ELF_START_MMAP 0x80000000
50 * This is used to ensure we don't load something for the wrong architecture.
52 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
55 * These are used to set parameters in the core dumps.
57 #define ELF_CLASS ELFCLASS32
58 #define ELF_DATA ELFDATA2LSB
59 #define ELF_ARCH EM_386
61 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
63 regs->esp = infop->start_stack;
64 regs->eip = infop->entry;
66 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
67 starts %edx contains a pointer to a function which might be
68 registered using `atexit'. This provides a mean for the
69 dynamic linker to call DT_FINI functions for shared libraries
70 that have been loaded before the code runs.
72 A value of 0 tells we have no such handler. */
76 #define USE_ELF_CORE_DUMP
77 #define ELF_EXEC_PAGESIZE 4096
83 #define ELF_START_MMAP 0x80000000
85 #define elf_check_arch(x) ( (x) == EM_ARM )
87 #define ELF_CLASS ELFCLASS32
88 #ifdef TARGET_WORDS_BIGENDIAN
89 #define ELF_DATA ELFDATA2MSB
91 #define ELF_DATA ELFDATA2LSB
93 #define ELF_ARCH EM_ARM
95 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
97 target_long stack = infop->start_stack;
98 memset(regs, 0, sizeof(*regs));
99 regs->ARM_cpsr = 0x10;
100 if (infop->entry & 1)
101 regs->ARM_cpsr |= CPSR_T;
102 regs->ARM_pc = infop->entry & 0xfffffffe;
103 regs->ARM_sp = infop->start_stack;
104 regs->ARM_r2 = tgetl(stack + 8); /* envp */
105 regs->ARM_r1 = tgetl(stack + 4); /* envp */
106 /* XXX: it seems that r0 is zeroed after ! */
108 /* For uClinux PIC binaries. */
109 regs->ARM_r10 = infop->start_data;
112 #define USE_ELF_CORE_DUMP
113 #define ELF_EXEC_PAGESIZE 4096
117 ARM_HWCAP_ARM_SWP = 1 << 0,
118 ARM_HWCAP_ARM_HALF = 1 << 1,
119 ARM_HWCAP_ARM_THUMB = 1 << 2,
120 ARM_HWCAP_ARM_26BIT = 1 << 3,
121 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
122 ARM_HWCAP_ARM_FPA = 1 << 5,
123 ARM_HWCAP_ARM_VFP = 1 << 6,
124 ARM_HWCAP_ARM_EDSP = 1 << 7,
127 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
128 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
129 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
134 #ifdef TARGET_SPARC64
136 #define ELF_START_MMAP 0x80000000
138 #define elf_check_arch(x) ( (x) == EM_SPARC )
140 #define ELF_CLASS ELFCLASS64
141 #define ELF_DATA ELFDATA2MSB
142 #define ELF_ARCH EM_SPARC
144 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
147 regs->pc = infop->entry;
148 regs->npc = regs->pc + 4;
150 regs->u_regs[14] = infop->start_stack - 16 * 4;
154 #define ELF_START_MMAP 0x80000000
156 #define elf_check_arch(x) ( (x) == EM_SPARC )
158 #define ELF_CLASS ELFCLASS32
159 #define ELF_DATA ELFDATA2MSB
160 #define ELF_ARCH EM_SPARC
162 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
165 regs->pc = infop->entry;
166 regs->npc = regs->pc + 4;
168 regs->u_regs[14] = infop->start_stack - 16 * 4;
176 #define ELF_START_MMAP 0x80000000
178 #define elf_check_arch(x) ( (x) == EM_PPC )
180 #define ELF_CLASS ELFCLASS32
181 #ifdef TARGET_WORDS_BIGENDIAN
182 #define ELF_DATA ELFDATA2MSB
184 #define ELF_DATA ELFDATA2LSB
186 #define ELF_ARCH EM_PPC
189 * We need to put in some extra aux table entries to tell glibc what
190 * the cache block size is, so it can use the dcbz instruction safely.
192 #define AT_DCACHEBSIZE 19
193 #define AT_ICACHEBSIZE 20
194 #define AT_UCACHEBSIZE 21
195 /* A special ignored type value for PPC, for glibc compatibility. */
196 #define AT_IGNOREPPC 22
198 * The requirements here are:
199 * - keep the final alignment of sp (sp & 0xf)
200 * - make sure the 32-bit value at the first 16 byte aligned position of
201 * AUXV is greater than 16 for glibc compatibility.
202 * AT_IGNOREPPC is used for that.
203 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
204 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
206 #define DLINFO_ARCH_ITEMS 5
207 #define ARCH_DLINFO \
209 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
210 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
211 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
213 * Now handle glibc compatibility. \
215 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
216 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
219 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
221 target_ulong pos = infop->start_stack;
224 _regs->msr = 1 << MSR_PR; /* Set user mode */
225 _regs->gpr[1] = infop->start_stack;
226 _regs->nip = infop->entry;
227 /* Note that isn't exactly what regular kernel does
228 * but this is what the ABI wants and is needed to allow
229 * execution of PPC BSD programs.
231 _regs->gpr[3] = tgetl(pos);
232 pos += sizeof(target_ulong);
234 for (tmp = 1; tmp != 0; pos += sizeof(target_ulong))
239 #define USE_ELF_CORE_DUMP
240 #define ELF_EXEC_PAGESIZE 4096
246 #define ELF_START_MMAP 0x80000000
248 #define elf_check_arch(x) ( (x) == EM_MIPS )
250 #define ELF_CLASS ELFCLASS32
251 #ifdef TARGET_WORDS_BIGENDIAN
252 #define ELF_DATA ELFDATA2MSB
254 #define ELF_DATA ELFDATA2LSB
256 #define ELF_ARCH EM_MIPS
258 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
260 regs->cp0_status = CP0St_UM;
261 regs->cp0_epc = infop->entry;
262 regs->regs[29] = infop->start_stack;
265 #endif /* TARGET_MIPS */
269 #define ELF_START_MMAP 0x80000000
271 #define elf_check_arch(x) ( (x) == EM_SH )
273 #define ELF_CLASS ELFCLASS32
274 #define ELF_DATA ELFDATA2LSB
275 #define ELF_ARCH EM_SH
277 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
279 /* Check other registers XXXXX */
280 regs->pc = infop->entry;
281 regs->regs[15] = infop->start_stack - 16 * 4;
284 #define USE_ELF_CORE_DUMP
285 #define ELF_EXEC_PAGESIZE 4096
290 #define ELF_PLATFORM (NULL)
301 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
302 unsigned int a_text; /* length of text, in bytes */
303 unsigned int a_data; /* length of data, in bytes */
304 unsigned int a_bss; /* length of uninitialized data area, in bytes */
305 unsigned int a_syms; /* length of symbol table data in file, in bytes */
306 unsigned int a_entry; /* start address */
307 unsigned int a_trsize; /* length of relocation info for text, in bytes */
308 unsigned int a_drsize; /* length of relocation info for data, in bytes */
312 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
318 /* max code+data+bss space allocated to elf interpreter */
319 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
321 /* max code+data+bss+brk space allocated to ET_DYN executables */
322 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
324 /* from personality.h */
326 /* Flags for bug emulation. These occupy the top three bytes. */
327 #define STICKY_TIMEOUTS 0x4000000
328 #define WHOLE_SECONDS 0x2000000
330 /* Personality types. These go in the low byte. Avoid using the top bit,
331 * it will conflict with error returns.
333 #define PER_MASK (0x00ff)
334 #define PER_LINUX (0x0000)
335 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
336 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
337 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
338 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
339 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
340 #define PER_BSD (0x0006)
341 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
343 /* Necessary parameters */
344 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
345 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
346 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
348 #define INTERPRETER_NONE 0
349 #define INTERPRETER_AOUT 1
350 #define INTERPRETER_ELF 2
352 #define DLINFO_ITEMS 12
354 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
359 extern unsigned long x86_stack_size;
361 static int load_aout_interp(void * exptr, int interp_fd);
364 static void bswap_ehdr(struct elfhdr *ehdr)
366 bswap16s(&ehdr->e_type); /* Object file type */
367 bswap16s(&ehdr->e_machine); /* Architecture */
368 bswap32s(&ehdr->e_version); /* Object file version */
369 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
370 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
371 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
372 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
373 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
374 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
375 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
376 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
377 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
378 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
381 static void bswap_phdr(struct elf_phdr *phdr)
383 bswap32s(&phdr->p_type); /* Segment type */
384 bswaptls(&phdr->p_offset); /* Segment file offset */
385 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
386 bswaptls(&phdr->p_paddr); /* Segment physical address */
387 bswaptls(&phdr->p_filesz); /* Segment size in file */
388 bswaptls(&phdr->p_memsz); /* Segment size in memory */
389 bswap32s(&phdr->p_flags); /* Segment flags */
390 bswaptls(&phdr->p_align); /* Segment alignment */
393 static void bswap_shdr(struct elf_shdr *shdr)
395 bswap32s(&shdr->sh_name);
396 bswap32s(&shdr->sh_type);
397 bswaptls(&shdr->sh_flags);
398 bswaptls(&shdr->sh_addr);
399 bswaptls(&shdr->sh_offset);
400 bswaptls(&shdr->sh_size);
401 bswap32s(&shdr->sh_link);
402 bswap32s(&shdr->sh_info);
403 bswaptls(&shdr->sh_addralign);
404 bswaptls(&shdr->sh_entsize);
407 static void bswap_sym(Elf32_Sym *sym)
409 bswap32s(&sym->st_name);
410 bswap32s(&sym->st_value);
411 bswap32s(&sym->st_size);
412 bswap16s(&sym->st_shndx);
417 * 'copy_elf_strings()' copies argument/envelope strings from user
418 * memory to free pages in kernel mem. These are in a format ready
419 * to be put directly into the top of new user memory.
422 static unsigned long copy_elf_strings(int argc,char ** argv, void **page,
425 char *tmp, *tmp1, *pag = NULL;
429 return 0; /* bullet-proofing */
434 fprintf(stderr, "VFS: argc is wrong");
440 if (p < len) { /* this shouldn't happen - 128kB */
446 offset = p % TARGET_PAGE_SIZE;
447 pag = (char *)page[p/TARGET_PAGE_SIZE];
449 pag = (char *)malloc(TARGET_PAGE_SIZE);
450 page[p/TARGET_PAGE_SIZE] = pag;
455 if (len == 0 || offset == 0) {
456 *(pag + offset) = *tmp;
459 int bytes_to_copy = (len > offset) ? offset : len;
460 tmp -= bytes_to_copy;
462 offset -= bytes_to_copy;
463 len -= bytes_to_copy;
464 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
471 unsigned long setup_arg_pages(target_ulong p, struct linux_binprm * bprm,
472 struct image_info * info)
474 target_ulong stack_base, size, error;
477 /* Create enough stack to hold everything. If we don't use
478 * it for args, we'll use it for something else...
480 size = x86_stack_size;
481 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
482 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
483 error = target_mmap(0,
484 size + qemu_host_page_size,
485 PROT_READ | PROT_WRITE,
486 MAP_PRIVATE | MAP_ANONYMOUS,
492 /* we reserve one extra page at the top of the stack as guard */
493 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
495 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
498 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
502 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
505 stack_base += TARGET_PAGE_SIZE;
510 static void set_brk(unsigned long start, unsigned long end)
512 /* page-align the start and end addresses... */
513 start = HOST_PAGE_ALIGN(start);
514 end = HOST_PAGE_ALIGN(end);
517 if(target_mmap(start, end - start,
518 PROT_READ | PROT_WRITE | PROT_EXEC,
519 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
520 perror("cannot mmap brk");
526 /* We need to explicitly zero any fractional pages after the data
527 section (i.e. bss). This would contain the junk from the file that
528 should not be in memory. */
529 static void padzero(unsigned long elf_bss)
533 /* XXX: this is really a hack : if the real host page size is
534 smaller than the target page size, some pages after the end
535 of the file may not be mapped. A better fix would be to
536 patch target_mmap(), but it is more complicated as the file
537 size must be known */
538 if (qemu_real_host_page_size < qemu_host_page_size) {
539 unsigned long end_addr, end_addr1;
540 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
541 ~(qemu_real_host_page_size - 1);
542 end_addr = HOST_PAGE_ALIGN(elf_bss);
543 if (end_addr1 < end_addr) {
544 mmap((void *)end_addr1, end_addr - end_addr1,
545 PROT_READ|PROT_WRITE|PROT_EXEC,
546 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
550 nbyte = elf_bss & (qemu_host_page_size-1);
552 nbyte = qemu_host_page_size - nbyte;
561 static unsigned long create_elf_tables(target_ulong p, int argc, int envc,
562 struct elfhdr * exec,
563 unsigned long load_addr,
564 unsigned long load_bias,
565 unsigned long interp_load_addr, int ibcs,
566 struct image_info *info)
570 target_ulong u_platform;
571 const char *k_platform;
572 const int n = sizeof(target_ulong);
576 k_platform = ELF_PLATFORM;
578 size_t len = strlen(k_platform) + 1;
579 sp -= (len + n - 1) & ~(n - 1);
581 memcpy_to_target(sp, k_platform, len);
584 * Force 16 byte _final_ alignment here for generality.
586 sp = sp &~ (target_ulong)15;
587 size = (DLINFO_ITEMS + 1) * 2;
590 #ifdef DLINFO_ARCH_ITEMS
591 size += DLINFO_ARCH_ITEMS * 2;
593 size += envc + argc + 2;
594 size += (!ibcs ? 3 : 1); /* argc itself */
597 sp -= 16 - (size & 15);
599 #define NEW_AUX_ENT(id, val) do { \
600 sp -= n; tputl(sp, val); \
601 sp -= n; tputl(sp, id); \
603 NEW_AUX_ENT (AT_NULL, 0);
605 /* There must be exactly DLINFO_ITEMS entries here. */
606 NEW_AUX_ENT(AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
607 NEW_AUX_ENT(AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
608 NEW_AUX_ENT(AT_PHNUM, (target_ulong)(exec->e_phnum));
609 NEW_AUX_ENT(AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
610 NEW_AUX_ENT(AT_BASE, (target_ulong)(interp_load_addr));
611 NEW_AUX_ENT(AT_FLAGS, (target_ulong)0);
612 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
613 NEW_AUX_ENT(AT_UID, (target_ulong) getuid());
614 NEW_AUX_ENT(AT_EUID, (target_ulong) geteuid());
615 NEW_AUX_ENT(AT_GID, (target_ulong) getgid());
616 NEW_AUX_ENT(AT_EGID, (target_ulong) getegid());
617 NEW_AUX_ENT(AT_HWCAP, (target_ulong) ELF_HWCAP);
619 NEW_AUX_ENT(AT_PLATFORM, u_platform);
622 * ARCH_DLINFO must come last so platform specific code can enforce
623 * special alignment requirements on the AUXV if necessary (eg. PPC).
629 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
634 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
636 unsigned long *interp_load_addr)
638 struct elf_phdr *elf_phdata = NULL;
639 struct elf_phdr *eppnt;
640 unsigned long load_addr = 0;
641 int load_addr_set = 0;
643 unsigned long last_bss, elf_bss;
652 bswap_ehdr(interp_elf_ex);
654 /* First of all, some simple consistency checks */
655 if ((interp_elf_ex->e_type != ET_EXEC &&
656 interp_elf_ex->e_type != ET_DYN) ||
657 !elf_check_arch(interp_elf_ex->e_machine)) {
662 /* Now read in all of the header information */
664 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
667 elf_phdata = (struct elf_phdr *)
668 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
674 * If the size of this structure has changed, then punt, since
675 * we will be doing the wrong thing.
677 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
682 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
684 retval = read(interpreter_fd,
686 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
689 perror("load_elf_interp");
696 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
701 if (interp_elf_ex->e_type == ET_DYN) {
702 /* in order to avoid harcoding the interpreter load
703 address in qemu, we allocate a big enough memory zone */
704 error = target_mmap(0, INTERP_MAP_SIZE,
705 PROT_NONE, MAP_PRIVATE | MAP_ANON,
716 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
717 if (eppnt->p_type == PT_LOAD) {
718 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
720 unsigned long vaddr = 0;
723 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
724 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
725 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
726 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
727 elf_type |= MAP_FIXED;
728 vaddr = eppnt->p_vaddr;
730 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
731 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
735 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
739 close(interpreter_fd);
744 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
750 * Find the end of the file mapping for this phdr, and keep
751 * track of the largest address we see for this.
753 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
754 if (k > elf_bss) elf_bss = k;
757 * Do the same thing for the memory mapping - between
758 * elf_bss and last_bss is the bss section.
760 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
761 if (k > last_bss) last_bss = k;
764 /* Now use mmap to map the library into memory. */
766 close(interpreter_fd);
769 * Now fill out the bss section. First pad the last page up
770 * to the page boundary, and then perform a mmap to make sure
771 * that there are zeromapped pages up to and including the last
775 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
777 /* Map the last of the bss segment */
778 if (last_bss > elf_bss) {
779 target_mmap(elf_bss, last_bss-elf_bss,
780 PROT_READ|PROT_WRITE|PROT_EXEC,
781 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
785 *interp_load_addr = load_addr;
786 return ((unsigned long) interp_elf_ex->e_entry) + load_addr;
789 /* Best attempt to load symbols from this ELF object. */
790 static void load_symbols(struct elfhdr *hdr, int fd)
793 struct elf_shdr sechdr, symtab, strtab;
797 lseek(fd, hdr->e_shoff, SEEK_SET);
798 for (i = 0; i < hdr->e_shnum; i++) {
799 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
804 if (sechdr.sh_type == SHT_SYMTAB) {
806 lseek(fd, hdr->e_shoff
807 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
808 if (read(fd, &strtab, sizeof(strtab))
817 return; /* Shouldn't happen... */
820 /* Now know where the strtab and symtab are. Snarf them. */
821 s = malloc(sizeof(*s));
822 s->disas_symtab = malloc(symtab.sh_size);
823 s->disas_strtab = strings = malloc(strtab.sh_size);
824 if (!s->disas_symtab || !s->disas_strtab)
827 lseek(fd, symtab.sh_offset, SEEK_SET);
828 if (read(fd, s->disas_symtab, symtab.sh_size) != symtab.sh_size)
832 for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++)
833 bswap_sym(s->disas_symtab + sizeof(struct elf_sym)*i);
836 lseek(fd, strtab.sh_offset, SEEK_SET);
837 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
839 s->disas_num_syms = symtab.sh_size / sizeof(struct elf_sym);
844 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
845 struct image_info * info)
847 struct elfhdr elf_ex;
848 struct elfhdr interp_elf_ex;
849 struct exec interp_ex;
850 int interpreter_fd = -1; /* avoid warning */
851 unsigned long load_addr, load_bias;
852 int load_addr_set = 0;
853 unsigned int interpreter_type = INTERPRETER_NONE;
854 unsigned char ibcs2_interpreter;
856 unsigned long mapped_addr;
857 struct elf_phdr * elf_ppnt;
858 struct elf_phdr *elf_phdata;
859 unsigned long elf_bss, k, elf_brk;
861 char * elf_interpreter;
862 unsigned long elf_entry, interp_load_addr = 0;
864 unsigned long start_code, end_code, end_data;
865 unsigned long elf_stack;
866 char passed_fileno[6];
868 ibcs2_interpreter = 0;
872 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
877 /* First of all, some simple consistency checks */
878 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
879 (! elf_check_arch(elf_ex.e_machine))) {
883 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
884 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
885 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
890 /* Now read in all of the header information */
891 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
892 if (elf_phdata == NULL) {
896 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
898 retval = read(bprm->fd, (char *) elf_phdata,
899 elf_ex.e_phentsize * elf_ex.e_phnum);
903 perror("load_elf_binary");
910 elf_ppnt = elf_phdata;
911 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
912 bswap_phdr(elf_ppnt);
915 elf_ppnt = elf_phdata;
922 elf_interpreter = NULL;
927 for(i=0;i < elf_ex.e_phnum; i++) {
928 if (elf_ppnt->p_type == PT_INTERP) {
929 if ( elf_interpreter != NULL )
932 free(elf_interpreter);
937 /* This is the program interpreter used for
938 * shared libraries - for now assume that this
939 * is an a.out format binary
942 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
944 if (elf_interpreter == NULL) {
950 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
952 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
955 perror("load_elf_binary2");
959 /* If the program interpreter is one of these two,
960 then assume an iBCS2 image. Otherwise assume
961 a native linux image. */
963 /* JRP - Need to add X86 lib dir stuff here... */
965 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
966 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
967 ibcs2_interpreter = 1;
971 printf("Using ELF interpreter %s\n", elf_interpreter);
974 retval = open(path(elf_interpreter), O_RDONLY);
976 interpreter_fd = retval;
979 perror(elf_interpreter);
981 /* retval = -errno; */
986 retval = lseek(interpreter_fd, 0, SEEK_SET);
988 retval = read(interpreter_fd,bprm->buf,128);
992 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
993 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
996 perror("load_elf_binary3");
999 free(elf_interpreter);
1007 /* Some simple consistency checks for the interpreter */
1008 if (elf_interpreter){
1009 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1011 /* Now figure out which format our binary is */
1012 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1013 (N_MAGIC(interp_ex) != QMAGIC)) {
1014 interpreter_type = INTERPRETER_ELF;
1017 if (interp_elf_ex.e_ident[0] != 0x7f ||
1018 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1019 interpreter_type &= ~INTERPRETER_ELF;
1022 if (!interpreter_type) {
1023 free(elf_interpreter);
1030 /* OK, we are done with that, now set up the arg stuff,
1031 and then start this sucker up */
1036 if (interpreter_type == INTERPRETER_AOUT) {
1037 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1038 passed_p = passed_fileno;
1040 if (elf_interpreter) {
1041 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1046 if (elf_interpreter) {
1047 free(elf_interpreter);
1055 /* OK, This is the point of no return */
1058 info->start_mmap = (unsigned long)ELF_START_MMAP;
1060 elf_entry = (unsigned long) elf_ex.e_entry;
1062 /* Do this so that we can load the interpreter, if need be. We will
1063 change some of these later */
1065 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1066 info->start_stack = bprm->p;
1068 /* Now we do a little grungy work by mmaping the ELF image into
1069 * the correct location in memory. At this point, we assume that
1070 * the image should be loaded at fixed address, not at a variable
1074 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1077 unsigned long error;
1079 if (elf_ppnt->p_type != PT_LOAD)
1082 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1083 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1084 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1085 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1086 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1087 elf_flags |= MAP_FIXED;
1088 } else if (elf_ex.e_type == ET_DYN) {
1089 /* Try and get dynamic programs out of the way of the default mmap
1090 base, as well as whatever program they might try to exec. This
1091 is because the brk will follow the loader, and is not movable. */
1092 /* NOTE: for qemu, we do a big mmap to get enough space
1093 without harcoding any address */
1094 error = target_mmap(0, ET_DYN_MAP_SIZE,
1095 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1101 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1104 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1105 (elf_ppnt->p_filesz +
1106 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1108 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1110 (elf_ppnt->p_offset -
1111 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1117 #ifdef LOW_ELF_STACK
1118 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1119 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1122 if (!load_addr_set) {
1124 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1125 if (elf_ex.e_type == ET_DYN) {
1126 load_bias += error -
1127 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1128 load_addr += load_bias;
1131 k = elf_ppnt->p_vaddr;
1134 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1137 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1141 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1142 if (k > elf_brk) elf_brk = k;
1145 elf_entry += load_bias;
1146 elf_bss += load_bias;
1147 elf_brk += load_bias;
1148 start_code += load_bias;
1149 end_code += load_bias;
1150 // start_data += load_bias;
1151 end_data += load_bias;
1153 if (elf_interpreter) {
1154 if (interpreter_type & 1) {
1155 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1157 else if (interpreter_type & 2) {
1158 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1162 close(interpreter_fd);
1163 free(elf_interpreter);
1165 if (elf_entry == ~0UL) {
1166 printf("Unable to load interpreter\n");
1176 load_symbols(&elf_ex, bprm->fd);
1178 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1179 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1181 #ifdef LOW_ELF_STACK
1182 info->start_stack = bprm->p = elf_stack - 4;
1184 bprm->p = create_elf_tables(bprm->p,
1188 load_addr, load_bias,
1190 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1192 info->start_brk = info->brk = elf_brk;
1193 info->end_code = end_code;
1194 info->start_code = start_code;
1195 info->start_data = end_code;
1196 info->end_data = end_data;
1197 info->start_stack = bprm->p;
1199 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1201 set_brk(elf_bss, elf_brk);
1206 printf("(start_brk) %x\n" , info->start_brk);
1207 printf("(end_code) %x\n" , info->end_code);
1208 printf("(start_code) %x\n" , info->start_code);
1209 printf("(end_data) %x\n" , info->end_data);
1210 printf("(start_stack) %x\n" , info->start_stack);
1211 printf("(brk) %x\n" , info->brk);
1214 if ( info->personality == PER_SVR4 )
1216 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1217 and some applications "depend" upon this behavior.
1218 Since we do not have the power to recompile these, we
1219 emulate the SVr4 behavior. Sigh. */
1220 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1221 MAP_FIXED | MAP_PRIVATE, -1, 0);
1224 info->entry = elf_entry;
1229 static int load_aout_interp(void * exptr, int interp_fd)
1231 printf("a.out interpreter not yet supported\n");
1235 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
1237 init_thread(regs, infop);