1 /* This is the Linux kernel elf-loading code, ported into user space */
16 /* this flag is uneffective under linux too, should be deleted */
18 #define MAP_DENYWRITE 0
21 /* should probably go in elf.h */
28 #define ELF_PLATFORM get_elf_platform()
30 static const char *get_elf_platform(void)
32 static char elf_platform[] = "i386";
33 int family = (global_env->cpuid_version >> 8) & 0xff;
37 elf_platform[1] = '0' + family;
41 #define ELF_HWCAP get_elf_hwcap()
43 static uint32_t get_elf_hwcap(void)
45 return global_env->cpuid_features;
48 #define ELF_START_MMAP 0x80000000
51 * This is used to ensure we don't load something for the wrong architecture.
53 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
56 * These are used to set parameters in the core dumps.
58 #define ELF_CLASS ELFCLASS32
59 #define ELF_DATA ELFDATA2LSB
60 #define ELF_ARCH EM_386
62 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
63 starts %edx contains a pointer to a function which might be
64 registered using `atexit'. This provides a mean for the
65 dynamic linker to call DT_FINI functions for shared libraries
66 that have been loaded before the code runs.
68 A value of 0 tells we have no such handler. */
69 #define ELF_PLAT_INIT(_r) _r->edx = 0
71 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
73 regs->esp = infop->start_stack;
74 regs->eip = infop->entry;
77 #define USE_ELF_CORE_DUMP
78 #define ELF_EXEC_PAGESIZE 4096
84 #define ELF_START_MMAP 0x80000000
86 #define elf_check_arch(x) ( (x) == EM_ARM )
88 #define ELF_CLASS ELFCLASS32
89 #ifdef TARGET_WORDS_BIGENDIAN
90 #define ELF_DATA ELFDATA2MSB
92 #define ELF_DATA ELFDATA2LSB
94 #define ELF_ARCH EM_ARM
96 #define ELF_PLAT_INIT(_r) _r->ARM_r0 = 0
98 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
100 target_long *stack = (void *)infop->start_stack;
101 memset(regs, 0, sizeof(*regs));
102 regs->ARM_cpsr = 0x10;
103 regs->ARM_pc = infop->entry;
104 regs->ARM_sp = infop->start_stack;
105 regs->ARM_r2 = tswapl(stack[2]); /* envp */
106 regs->ARM_r1 = tswapl(stack[1]); /* argv */
107 /* XXX: it seems that r0 is zeroed after ! */
108 // regs->ARM_r0 = tswapl(stack[0]); /* argc */
111 #define USE_ELF_CORE_DUMP
112 #define ELF_EXEC_PAGESIZE 4096
116 ARM_HWCAP_ARM_SWP = 1 << 0,
117 ARM_HWCAP_ARM_HALF = 1 << 1,
118 ARM_HWCAP_ARM_THUMB = 1 << 2,
119 ARM_HWCAP_ARM_26BIT = 1 << 3,
120 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
121 ARM_HWCAP_ARM_FPA = 1 << 5,
122 ARM_HWCAP_ARM_VFP = 1 << 6,
123 ARM_HWCAP_ARM_EDSP = 1 << 7,
126 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
127 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
128 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
133 #ifdef TARGET_SPARC64
135 #define ELF_START_MMAP 0x80000000
137 #define elf_check_arch(x) ( (x) == EM_SPARC )
139 #define ELF_CLASS ELFCLASS64
140 #define ELF_DATA ELFDATA2MSB
141 #define ELF_ARCH EM_SPARC
144 #define ELF_PLAT_INIT(_r)
146 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
149 regs->pc = infop->entry;
150 regs->npc = regs->pc + 4;
152 regs->u_regs[14] = infop->start_stack - 16 * 4;
156 #define ELF_START_MMAP 0x80000000
158 #define elf_check_arch(x) ( (x) == EM_SPARC )
160 #define ELF_CLASS ELFCLASS32
161 #define ELF_DATA ELFDATA2MSB
162 #define ELF_ARCH EM_SPARC
165 #define ELF_PLAT_INIT(_r)
167 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
170 regs->pc = infop->entry;
171 regs->npc = regs->pc + 4;
173 regs->u_regs[14] = infop->start_stack - 16 * 4;
181 #define ELF_START_MMAP 0x80000000
183 #define elf_check_arch(x) ( (x) == EM_PPC )
185 #define ELF_CLASS ELFCLASS32
186 #ifdef TARGET_WORDS_BIGENDIAN
187 #define ELF_DATA ELFDATA2MSB
189 #define ELF_DATA ELFDATA2LSB
191 #define ELF_ARCH EM_PPC
193 /* Note that isn't exactly what regular kernel does
194 * but this is what the ABI wants and is needed to allow
195 * execution of PPC BSD programs.
197 #define ELF_PLAT_INIT(_r) \
199 target_ulong *pos = (target_ulong *)bprm->p, tmp = 1; \
200 _r->gpr[3] = bprm->argc; \
201 _r->gpr[4] = (unsigned long)++pos; \
202 for (; tmp != 0; pos++) \
204 _r->gpr[5] = (unsigned long)pos; \
208 * We need to put in some extra aux table entries to tell glibc what
209 * the cache block size is, so it can use the dcbz instruction safely.
211 #define AT_DCACHEBSIZE 19
212 #define AT_ICACHEBSIZE 20
213 #define AT_UCACHEBSIZE 21
214 /* A special ignored type value for PPC, for glibc compatibility. */
215 #define AT_IGNOREPPC 22
217 * The requirements here are:
218 * - keep the final alignment of sp (sp & 0xf)
219 * - make sure the 32-bit value at the first 16 byte aligned position of
220 * AUXV is greater than 16 for glibc compatibility.
221 * AT_IGNOREPPC is used for that.
222 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
223 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
225 #define DLINFO_ARCH_ITEMS 5
226 #define ARCH_DLINFO \
228 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
229 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
230 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
232 * Now handle glibc compatibility. \
234 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
235 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
238 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
240 _regs->msr = 1 << MSR_PR; /* Set user mode */
241 _regs->gpr[1] = infop->start_stack;
242 _regs->nip = infop->entry;
245 #define USE_ELF_CORE_DUMP
246 #define ELF_EXEC_PAGESIZE 4096
251 #define ELF_PLATFORM (NULL)
261 * MAX_ARG_PAGES defines the number of pages allocated for arguments
262 * and envelope for the new program. 32 should suffice, this gives
263 * a maximum env+arg of 128kB w/4KB pages!
265 #define MAX_ARG_PAGES 32
268 * This structure is used to hold the arguments that are
269 * used when loading binaries.
271 struct linux_binprm {
273 unsigned long page[MAX_ARG_PAGES];
279 char * filename; /* Name of binary */
280 unsigned long loader, exec;
281 int dont_iput; /* binfmt handler has put inode */
286 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
287 unsigned int a_text; /* length of text, in bytes */
288 unsigned int a_data; /* length of data, in bytes */
289 unsigned int a_bss; /* length of uninitialized data area, in bytes */
290 unsigned int a_syms; /* length of symbol table data in file, in bytes */
291 unsigned int a_entry; /* start address */
292 unsigned int a_trsize; /* length of relocation info for text, in bytes */
293 unsigned int a_drsize; /* length of relocation info for data, in bytes */
297 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
303 /* max code+data+bss space allocated to elf interpreter */
304 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
306 /* max code+data+bss+brk space allocated to ET_DYN executables */
307 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
309 /* from personality.h */
311 /* Flags for bug emulation. These occupy the top three bytes. */
312 #define STICKY_TIMEOUTS 0x4000000
313 #define WHOLE_SECONDS 0x2000000
315 /* Personality types. These go in the low byte. Avoid using the top bit,
316 * it will conflict with error returns.
318 #define PER_MASK (0x00ff)
319 #define PER_LINUX (0x0000)
320 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
321 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
322 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
323 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
324 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
325 #define PER_BSD (0x0006)
326 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
328 /* Necessary parameters */
331 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
332 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
333 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
335 #define INTERPRETER_NONE 0
336 #define INTERPRETER_AOUT 1
337 #define INTERPRETER_ELF 2
339 #define DLINFO_ITEMS 12
341 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
346 extern unsigned long x86_stack_size;
348 static int load_aout_interp(void * exptr, int interp_fd);
351 static void bswap_ehdr(struct elfhdr *ehdr)
353 bswap16s(&ehdr->e_type); /* Object file type */
354 bswap16s(&ehdr->e_machine); /* Architecture */
355 bswap32s(&ehdr->e_version); /* Object file version */
356 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
357 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
358 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
359 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
360 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
361 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
362 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
363 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
364 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
365 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
368 static void bswap_phdr(struct elf_phdr *phdr)
370 bswap32s(&phdr->p_type); /* Segment type */
371 bswaptls(&phdr->p_offset); /* Segment file offset */
372 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
373 bswaptls(&phdr->p_paddr); /* Segment physical address */
374 bswaptls(&phdr->p_filesz); /* Segment size in file */
375 bswaptls(&phdr->p_memsz); /* Segment size in memory */
376 bswap32s(&phdr->p_flags); /* Segment flags */
377 bswaptls(&phdr->p_align); /* Segment alignment */
380 static void bswap_shdr(struct elf_shdr *shdr)
382 bswap32s(&shdr->sh_name);
383 bswap32s(&shdr->sh_type);
384 bswaptls(&shdr->sh_flags);
385 bswaptls(&shdr->sh_addr);
386 bswaptls(&shdr->sh_offset);
387 bswaptls(&shdr->sh_size);
388 bswap32s(&shdr->sh_link);
389 bswap32s(&shdr->sh_info);
390 bswaptls(&shdr->sh_addralign);
391 bswaptls(&shdr->sh_entsize);
394 static void bswap_sym(Elf32_Sym *sym)
396 bswap32s(&sym->st_name);
397 bswap32s(&sym->st_value);
398 bswap32s(&sym->st_size);
399 bswap16s(&sym->st_shndx);
403 static void * get_free_page(void)
407 /* User-space version of kernel get_free_page. Returns a page-aligned
408 * page-sized chunk of memory.
410 retval = (void *)target_mmap(0, qemu_host_page_size, PROT_READ|PROT_WRITE,
411 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
413 if((long)retval == -1) {
414 perror("get_free_page");
422 static void free_page(void * pageaddr)
424 target_munmap((unsigned long)pageaddr, qemu_host_page_size);
428 * 'copy_string()' copies argument/envelope strings from user
429 * memory to free pages in kernel mem. These are in a format ready
430 * to be put directly into the top of new user memory.
433 static unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
436 char *tmp, *tmp1, *pag = NULL;
440 return 0; /* bullet-proofing */
445 fprintf(stderr, "VFS: argc is wrong");
451 if (p < len) { /* this shouldn't happen - 128kB */
457 offset = p % TARGET_PAGE_SIZE;
458 pag = (char *) page[p/TARGET_PAGE_SIZE];
460 pag = (char *)get_free_page();
461 page[p/TARGET_PAGE_SIZE] = (unsigned long)pag;
466 if (len == 0 || offset == 0) {
467 *(pag + offset) = *tmp;
470 int bytes_to_copy = (len > offset) ? offset : len;
471 tmp -= bytes_to_copy;
473 offset -= bytes_to_copy;
474 len -= bytes_to_copy;
475 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
482 static int in_group_p(gid_t g)
484 /* return TRUE if we're in the specified group, FALSE otherwise */
487 gid_t grouplist[NGROUPS];
489 ngroup = getgroups(NGROUPS, grouplist);
490 for(i = 0; i < ngroup; i++) {
491 if(grouplist[i] == g) {
498 static int count(char ** vec)
502 for(i = 0; *vec; i++) {
509 static int prepare_binprm(struct linux_binprm *bprm)
513 int retval, id_change;
515 if(fstat(bprm->fd, &st) < 0) {
520 if(!S_ISREG(mode)) { /* Must be regular file */
523 if(!(mode & 0111)) { /* Must have at least one execute bit set */
527 bprm->e_uid = geteuid();
528 bprm->e_gid = getegid();
533 bprm->e_uid = st.st_uid;
534 if(bprm->e_uid != geteuid()) {
541 * If setgid is set but no group execute bit then this
542 * is a candidate for mandatory locking, not a setgid
545 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
546 bprm->e_gid = st.st_gid;
547 if (!in_group_p(bprm->e_gid)) {
552 memset(bprm->buf, 0, sizeof(bprm->buf));
553 retval = lseek(bprm->fd, 0L, SEEK_SET);
555 retval = read(bprm->fd, bprm->buf, 128);
558 perror("prepare_binprm");
560 /* return(-errno); */
567 unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
568 struct image_info * info)
570 unsigned long stack_base, size, error;
573 /* Create enough stack to hold everything. If we don't use
574 * it for args, we'll use it for something else...
576 size = x86_stack_size;
577 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
578 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
579 error = target_mmap(0,
580 size + qemu_host_page_size,
581 PROT_READ | PROT_WRITE,
582 MAP_PRIVATE | MAP_ANONYMOUS,
588 /* we reserve one extra page at the top of the stack as guard */
589 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
591 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
595 bprm->loader += stack_base;
597 bprm->exec += stack_base;
599 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
603 memcpy((void *)stack_base, (void *)bprm->page[i], TARGET_PAGE_SIZE);
604 free_page((void *)bprm->page[i]);
606 stack_base += TARGET_PAGE_SIZE;
611 static void set_brk(unsigned long start, unsigned long end)
613 /* page-align the start and end addresses... */
614 start = HOST_PAGE_ALIGN(start);
615 end = HOST_PAGE_ALIGN(end);
618 if(target_mmap(start, end - start,
619 PROT_READ | PROT_WRITE | PROT_EXEC,
620 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
621 perror("cannot mmap brk");
627 /* We need to explicitly zero any fractional pages after the data
628 section (i.e. bss). This would contain the junk from the file that
629 should not be in memory. */
630 static void padzero(unsigned long elf_bss)
635 /* XXX: this is really a hack : if the real host page size is
636 smaller than the target page size, some pages after the end
637 of the file may not be mapped. A better fix would be to
638 patch target_mmap(), but it is more complicated as the file
639 size must be known */
640 if (qemu_real_host_page_size < qemu_host_page_size) {
641 unsigned long end_addr, end_addr1;
642 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
643 ~(qemu_real_host_page_size - 1);
644 end_addr = HOST_PAGE_ALIGN(elf_bss);
645 if (end_addr1 < end_addr) {
646 mmap((void *)end_addr1, end_addr - end_addr1,
647 PROT_READ|PROT_WRITE|PROT_EXEC,
648 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
652 nbyte = elf_bss & (qemu_host_page_size-1);
654 nbyte = qemu_host_page_size - nbyte;
655 fpnt = (char *) elf_bss;
662 static unsigned int * create_elf_tables(char *p, int argc, int envc,
663 struct elfhdr * exec,
664 unsigned long load_addr,
665 unsigned long load_bias,
666 unsigned long interp_load_addr, int ibcs,
667 struct image_info *info)
669 target_ulong *argv, *envp;
670 target_ulong *sp, *csp;
671 target_ulong *u_platform;
672 const char *k_platform;
676 * Force 16 byte _final_ alignment here for generality.
678 sp = (unsigned int *) (~15UL & (unsigned long) p);
680 k_platform = ELF_PLATFORM;
682 size_t len = strlen(k_platform) + 1;
683 sp -= (len + sizeof(target_ulong) - 1) / sizeof(target_ulong);
684 u_platform = (target_ulong *)sp;
685 __copy_to_user(u_platform, k_platform, len);
688 csp -= (DLINFO_ITEMS + 1) * 2;
691 #ifdef DLINFO_ARCH_ITEMS
692 csp -= DLINFO_ARCH_ITEMS*2;
696 csp -= (!ibcs ? 3 : 1); /* argc itself */
697 if ((unsigned long)csp & 15UL)
698 sp -= ((unsigned long)csp & 15UL) / sizeof(*sp);
700 #define NEW_AUX_ENT(id, val) \
703 put_user (val, sp + 1)
704 NEW_AUX_ENT (AT_NULL, 0);
706 /* There must be exactly DLINFO_ITEMS entries here. */
707 NEW_AUX_ENT(AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
708 NEW_AUX_ENT(AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
709 NEW_AUX_ENT(AT_PHNUM, (target_ulong)(exec->e_phnum));
710 NEW_AUX_ENT(AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
711 NEW_AUX_ENT(AT_BASE, (target_ulong)(interp_load_addr));
712 NEW_AUX_ENT(AT_FLAGS, (target_ulong)0);
713 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
714 NEW_AUX_ENT(AT_UID, (target_ulong) getuid());
715 NEW_AUX_ENT(AT_EUID, (target_ulong) geteuid());
716 NEW_AUX_ENT(AT_GID, (target_ulong) getgid());
717 NEW_AUX_ENT(AT_EGID, (target_ulong) getegid());
718 NEW_AUX_ENT(AT_HWCAP, (target_ulong) ELF_HWCAP);
720 NEW_AUX_ENT(AT_PLATFORM, (target_ulong) u_platform);
723 * ARCH_DLINFO must come last so platform specific code can enforce
724 * special alignment requirements on the AUXV if necessary (eg. PPC).
735 put_user((target_ulong)envp,--sp);
736 put_user((target_ulong)argv,--sp);
739 info->arg_start = (unsigned int)((unsigned long)p & 0xffffffff);
741 put_user((target_ulong)p,argv++);
748 info->arg_end = info->env_start = (unsigned int)((unsigned long)p & 0xffffffff);
750 put_user((target_ulong)p,envp++);
757 info->env_end = (unsigned int)((unsigned long)p & 0xffffffff);
763 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
765 unsigned long *interp_load_addr)
767 struct elf_phdr *elf_phdata = NULL;
768 struct elf_phdr *eppnt;
769 unsigned long load_addr = 0;
770 int load_addr_set = 0;
772 unsigned long last_bss, elf_bss;
781 bswap_ehdr(interp_elf_ex);
783 /* First of all, some simple consistency checks */
784 if ((interp_elf_ex->e_type != ET_EXEC &&
785 interp_elf_ex->e_type != ET_DYN) ||
786 !elf_check_arch(interp_elf_ex->e_machine)) {
791 /* Now read in all of the header information */
793 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
796 elf_phdata = (struct elf_phdr *)
797 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
803 * If the size of this structure has changed, then punt, since
804 * we will be doing the wrong thing.
806 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
811 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
813 retval = read(interpreter_fd,
815 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
818 perror("load_elf_interp");
825 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
830 if (interp_elf_ex->e_type == ET_DYN) {
831 /* in order to avoid harcoding the interpreter load
832 address in qemu, we allocate a big enough memory zone */
833 error = target_mmap(0, INTERP_MAP_SIZE,
834 PROT_NONE, MAP_PRIVATE | MAP_ANON,
845 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
846 if (eppnt->p_type == PT_LOAD) {
847 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
849 unsigned long vaddr = 0;
852 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
853 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
854 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
855 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
856 elf_type |= MAP_FIXED;
857 vaddr = eppnt->p_vaddr;
859 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
860 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
864 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
866 if (error > -1024UL) {
868 close(interpreter_fd);
873 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
879 * Find the end of the file mapping for this phdr, and keep
880 * track of the largest address we see for this.
882 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
883 if (k > elf_bss) elf_bss = k;
886 * Do the same thing for the memory mapping - between
887 * elf_bss and last_bss is the bss section.
889 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
890 if (k > last_bss) last_bss = k;
893 /* Now use mmap to map the library into memory. */
895 close(interpreter_fd);
898 * Now fill out the bss section. First pad the last page up
899 * to the page boundary, and then perform a mmap to make sure
900 * that there are zeromapped pages up to and including the last
904 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
906 /* Map the last of the bss segment */
907 if (last_bss > elf_bss) {
908 target_mmap(elf_bss, last_bss-elf_bss,
909 PROT_READ|PROT_WRITE|PROT_EXEC,
910 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
914 *interp_load_addr = load_addr;
915 return ((unsigned long) interp_elf_ex->e_entry) + load_addr;
918 /* Best attempt to load symbols from this ELF object. */
919 static void load_symbols(struct elfhdr *hdr, int fd)
922 struct elf_shdr sechdr, symtab, strtab;
926 lseek(fd, hdr->e_shoff, SEEK_SET);
927 for (i = 0; i < hdr->e_shnum; i++) {
928 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
933 if (sechdr.sh_type == SHT_SYMTAB) {
935 lseek(fd, hdr->e_shoff
936 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
937 if (read(fd, &strtab, sizeof(strtab))
946 return; /* Shouldn't happen... */
949 /* Now know where the strtab and symtab are. Snarf them. */
950 s = malloc(sizeof(*s));
951 s->disas_symtab = malloc(symtab.sh_size);
952 s->disas_strtab = strings = malloc(strtab.sh_size);
953 if (!s->disas_symtab || !s->disas_strtab)
956 lseek(fd, symtab.sh_offset, SEEK_SET);
957 if (read(fd, s->disas_symtab, symtab.sh_size) != symtab.sh_size)
961 for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++)
962 bswap_sym(s->disas_symtab + sizeof(struct elf_sym)*i);
965 lseek(fd, strtab.sh_offset, SEEK_SET);
966 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
968 s->disas_num_syms = symtab.sh_size / sizeof(struct elf_sym);
973 static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
974 struct image_info * info)
976 struct elfhdr elf_ex;
977 struct elfhdr interp_elf_ex;
978 struct exec interp_ex;
979 int interpreter_fd = -1; /* avoid warning */
980 unsigned long load_addr, load_bias;
981 int load_addr_set = 0;
982 unsigned int interpreter_type = INTERPRETER_NONE;
983 unsigned char ibcs2_interpreter;
985 unsigned long mapped_addr;
986 struct elf_phdr * elf_ppnt;
987 struct elf_phdr *elf_phdata;
988 unsigned long elf_bss, k, elf_brk;
990 char * elf_interpreter;
991 unsigned long elf_entry, interp_load_addr = 0;
993 unsigned long start_code, end_code, end_data;
994 unsigned long elf_stack;
995 char passed_fileno[6];
997 ibcs2_interpreter = 0;
1001 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1003 bswap_ehdr(&elf_ex);
1006 if (elf_ex.e_ident[0] != 0x7f ||
1007 strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) {
1011 /* First of all, some simple consistency checks */
1012 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1013 (! elf_check_arch(elf_ex.e_machine))) {
1017 /* Now read in all of the header information */
1018 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1019 if (elf_phdata == NULL) {
1023 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1025 retval = read(bprm->fd, (char *) elf_phdata,
1026 elf_ex.e_phentsize * elf_ex.e_phnum);
1030 perror("load_elf_binary");
1037 elf_ppnt = elf_phdata;
1038 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1039 bswap_phdr(elf_ppnt);
1042 elf_ppnt = elf_phdata;
1049 elf_interpreter = NULL;
1054 for(i=0;i < elf_ex.e_phnum; i++) {
1055 if (elf_ppnt->p_type == PT_INTERP) {
1056 if ( elf_interpreter != NULL )
1059 free(elf_interpreter);
1064 /* This is the program interpreter used for
1065 * shared libraries - for now assume that this
1066 * is an a.out format binary
1069 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1071 if (elf_interpreter == NULL) {
1077 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1079 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1082 perror("load_elf_binary2");
1086 /* If the program interpreter is one of these two,
1087 then assume an iBCS2 image. Otherwise assume
1088 a native linux image. */
1090 /* JRP - Need to add X86 lib dir stuff here... */
1092 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1093 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1094 ibcs2_interpreter = 1;
1098 printf("Using ELF interpreter %s\n", elf_interpreter);
1101 retval = open(path(elf_interpreter), O_RDONLY);
1103 interpreter_fd = retval;
1106 perror(elf_interpreter);
1108 /* retval = -errno; */
1113 retval = lseek(interpreter_fd, 0, SEEK_SET);
1115 retval = read(interpreter_fd,bprm->buf,128);
1119 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1120 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
1123 perror("load_elf_binary3");
1126 free(elf_interpreter);
1134 /* Some simple consistency checks for the interpreter */
1135 if (elf_interpreter){
1136 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1138 /* Now figure out which format our binary is */
1139 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1140 (N_MAGIC(interp_ex) != QMAGIC)) {
1141 interpreter_type = INTERPRETER_ELF;
1144 if (interp_elf_ex.e_ident[0] != 0x7f ||
1145 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1146 interpreter_type &= ~INTERPRETER_ELF;
1149 if (!interpreter_type) {
1150 free(elf_interpreter);
1157 /* OK, we are done with that, now set up the arg stuff,
1158 and then start this sucker up */
1160 if (!bprm->sh_bang) {
1163 if (interpreter_type == INTERPRETER_AOUT) {
1164 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1165 passed_p = passed_fileno;
1167 if (elf_interpreter) {
1168 bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p);
1173 if (elf_interpreter) {
1174 free(elf_interpreter);
1182 /* OK, This is the point of no return */
1185 info->start_mmap = (unsigned long)ELF_START_MMAP;
1187 elf_entry = (unsigned long) elf_ex.e_entry;
1189 /* Do this so that we can load the interpreter, if need be. We will
1190 change some of these later */
1192 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1193 info->start_stack = bprm->p;
1195 /* Now we do a little grungy work by mmaping the ELF image into
1196 * the correct location in memory. At this point, we assume that
1197 * the image should be loaded at fixed address, not at a variable
1201 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1204 unsigned long error;
1206 if (elf_ppnt->p_type != PT_LOAD)
1209 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1210 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1211 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1212 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1213 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1214 elf_flags |= MAP_FIXED;
1215 } else if (elf_ex.e_type == ET_DYN) {
1216 /* Try and get dynamic programs out of the way of the default mmap
1217 base, as well as whatever program they might try to exec. This
1218 is because the brk will follow the loader, and is not movable. */
1219 /* NOTE: for qemu, we do a big mmap to get enough space
1220 without harcoding any address */
1221 error = target_mmap(0, ET_DYN_MAP_SIZE,
1222 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1228 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1231 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1232 (elf_ppnt->p_filesz +
1233 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1235 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1237 (elf_ppnt->p_offset -
1238 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1244 #ifdef LOW_ELF_STACK
1245 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1246 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1249 if (!load_addr_set) {
1251 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1252 if (elf_ex.e_type == ET_DYN) {
1253 load_bias += error -
1254 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1255 load_addr += load_bias;
1258 k = elf_ppnt->p_vaddr;
1261 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1264 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1268 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1269 if (k > elf_brk) elf_brk = k;
1272 elf_entry += load_bias;
1273 elf_bss += load_bias;
1274 elf_brk += load_bias;
1275 start_code += load_bias;
1276 end_code += load_bias;
1277 // start_data += load_bias;
1278 end_data += load_bias;
1280 if (elf_interpreter) {
1281 if (interpreter_type & 1) {
1282 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1284 else if (interpreter_type & 2) {
1285 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1289 close(interpreter_fd);
1290 free(elf_interpreter);
1292 if (elf_entry == ~0UL) {
1293 printf("Unable to load interpreter\n");
1303 load_symbols(&elf_ex, bprm->fd);
1305 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1306 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1308 #ifdef LOW_ELF_STACK
1309 info->start_stack = bprm->p = elf_stack - 4;
1311 bprm->p = (unsigned long)
1312 create_elf_tables((char *)bprm->p,
1316 load_addr, load_bias,
1318 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1320 if (interpreter_type == INTERPRETER_AOUT)
1321 info->arg_start += strlen(passed_fileno) + 1;
1322 info->start_brk = info->brk = elf_brk;
1323 info->end_code = end_code;
1324 info->start_code = start_code;
1325 info->end_data = end_data;
1326 info->start_stack = bprm->p;
1328 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1330 set_brk(elf_bss, elf_brk);
1335 printf("(start_brk) %x\n" , info->start_brk);
1336 printf("(end_code) %x\n" , info->end_code);
1337 printf("(start_code) %x\n" , info->start_code);
1338 printf("(end_data) %x\n" , info->end_data);
1339 printf("(start_stack) %x\n" , info->start_stack);
1340 printf("(brk) %x\n" , info->brk);
1343 if ( info->personality == PER_SVR4 )
1345 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1346 and some applications "depend" upon this behavior.
1347 Since we do not have the power to recompile these, we
1348 emulate the SVr4 behavior. Sigh. */
1349 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1350 MAP_FIXED | MAP_PRIVATE, -1, 0);
1353 #ifdef ELF_PLAT_INIT
1355 * The ABI may specify that certain registers be set up in special
1356 * ways (on i386 %edx is the address of a DT_FINI function, for
1357 * example. This macro performs whatever initialization to
1358 * the regs structure is required.
1360 ELF_PLAT_INIT(regs);
1364 info->entry = elf_entry;
1371 int elf_exec(const char * filename, char ** argv, char ** envp,
1372 struct target_pt_regs * regs, struct image_info *infop)
1374 struct linux_binprm bprm;
1378 bprm.p = TARGET_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int);
1379 for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
1381 retval = open(filename, O_RDONLY);
1385 bprm.filename = (char *)filename;
1390 bprm.argc = count(argv);
1391 bprm.envc = count(envp);
1393 retval = prepare_binprm(&bprm);
1396 bprm.p = copy_strings(1, &bprm.filename, bprm.page, bprm.p);
1398 bprm.p = copy_strings(bprm.envc,envp,bprm.page,bprm.p);
1399 bprm.p = copy_strings(bprm.argc,argv,bprm.page,bprm.p);
1406 retval = load_elf_binary(&bprm,regs,infop);
1409 /* success. Initialize important registers */
1410 init_thread(regs, infop);
1414 /* Something went wrong, return the inode and free the argument pages*/
1415 for (i=0 ; i<MAX_ARG_PAGES ; i++) {
1416 free_page((void *)bprm.page[i]);
1422 static int load_aout_interp(void * exptr, int interp_fd)
1424 printf("a.out interpreter not yet supported\n");