1 /* This is the Linux kernel elf-loading code, ported into user space */
17 #define ELF_START_MMAP 0x80000000
19 typedef uint32_t elf_greg_t;
21 #define ELF_NGREG (sizeof (struct target_pt_regs) / sizeof(elf_greg_t))
22 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
24 typedef struct user_i387_struct elf_fpregset_t;
27 * This is used to ensure we don't load something for the wrong architecture.
29 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
32 * These are used to set parameters in the core dumps.
34 #define ELF_CLASS ELFCLASS32
35 #define ELF_DATA ELFDATA2LSB
36 #define ELF_ARCH EM_386
38 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
39 starts %edx contains a pointer to a function which might be
40 registered using `atexit'. This provides a mean for the
41 dynamic linker to call DT_FINI functions for shared libraries
42 that have been loaded before the code runs.
44 A value of 0 tells we have no such handler. */
45 #define ELF_PLAT_INIT(_r) _r->edx = 0
47 #define USE_ELF_CORE_DUMP
48 #define ELF_EXEC_PAGESIZE 4096
55 * MAX_ARG_PAGES defines the number of pages allocated for arguments
56 * and envelope for the new program. 32 should suffice, this gives
57 * a maximum env+arg of 128kB w/4KB pages!
59 #define MAX_ARG_PAGES 32
62 * This structure is used to hold the arguments that are
63 * used when loading binaries.
67 unsigned long page[MAX_ARG_PAGES];
73 char * filename; /* Name of binary */
74 unsigned long loader, exec;
75 int dont_iput; /* binfmt handler has put inode */
80 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
81 unsigned int a_text; /* length of text, in bytes */
82 unsigned int a_data; /* length of data, in bytes */
83 unsigned int a_bss; /* length of uninitialized data area, in bytes */
84 unsigned int a_syms; /* length of symbol table data in file, in bytes */
85 unsigned int a_entry; /* start address */
86 unsigned int a_trsize; /* length of relocation info for text, in bytes */
87 unsigned int a_drsize; /* length of relocation info for data, in bytes */
91 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
97 #define X86_STACK_TOP 0x7d000000
99 /* max code+data+bss space allocated to elf interpreter */
100 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
102 /* max code+data+bss+brk space allocated to ET_DYN executables */
103 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
105 /* from personality.h */
107 /* Flags for bug emulation. These occupy the top three bytes. */
108 #define STICKY_TIMEOUTS 0x4000000
109 #define WHOLE_SECONDS 0x2000000
111 /* Personality types. These go in the low byte. Avoid using the top bit,
112 * it will conflict with error returns.
114 #define PER_MASK (0x00ff)
115 #define PER_LINUX (0x0000)
116 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
117 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
118 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
119 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
120 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
121 #define PER_BSD (0x0006)
122 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
124 /* Necessary parameters */
125 #define ALPHA_PAGE_SIZE 4096
126 #define X86_PAGE_SIZE 4096
128 #define ALPHA_PAGE_MASK (~(ALPHA_PAGE_SIZE-1))
129 #define X86_PAGE_MASK (~(X86_PAGE_SIZE-1))
131 #define ALPHA_PAGE_ALIGN(addr) ((((addr)+ALPHA_PAGE_SIZE)-1)&ALPHA_PAGE_MASK)
132 #define X86_PAGE_ALIGN(addr) ((((addr)+X86_PAGE_SIZE)-1)&X86_PAGE_MASK)
136 #define X86_ELF_EXEC_PAGESIZE X86_PAGE_SIZE
137 #define X86_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(X86_ELF_EXEC_PAGESIZE-1))
138 #define X86_ELF_PAGEOFFSET(_v) ((_v) & (X86_ELF_EXEC_PAGESIZE-1))
140 #define ALPHA_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ALPHA_PAGE_SIZE-1))
141 #define ALPHA_ELF_PAGEOFFSET(_v) ((_v) & (ALPHA_PAGE_SIZE-1))
143 #define INTERPRETER_NONE 0
144 #define INTERPRETER_AOUT 1
145 #define INTERPRETER_ELF 2
147 #define DLINFO_ITEMS 12
149 #define put_user(x,ptr) (void)(*(ptr) = (typeof(*ptr))(x))
150 #define get_user(ptr) (typeof(*ptr))(*(ptr))
152 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
157 static inline void memcpy_tofs(void * to, const void * from, unsigned long n)
162 //extern void * mmap4k();
163 #define mmap4k(a, b, c, d, e, f) mmap((void *)(a), b, c, d, e, f)
165 extern unsigned long x86_stack_size;
167 static int load_aout_interp(void * exptr, int interp_fd);
170 static void bswap_ehdr(Elf32_Ehdr *ehdr)
172 bswap16s(&ehdr->e_type); /* Object file type */
173 bswap16s(&ehdr->e_machine); /* Architecture */
174 bswap32s(&ehdr->e_version); /* Object file version */
175 bswap32s(&ehdr->e_entry); /* Entry point virtual address */
176 bswap32s(&ehdr->e_phoff); /* Program header table file offset */
177 bswap32s(&ehdr->e_shoff); /* Section header table file offset */
178 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
179 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
180 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
181 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
182 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
183 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
184 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
187 static void bswap_phdr(Elf32_Phdr *phdr)
189 bswap32s(&phdr->p_type); /* Segment type */
190 bswap32s(&phdr->p_offset); /* Segment file offset */
191 bswap32s(&phdr->p_vaddr); /* Segment virtual address */
192 bswap32s(&phdr->p_paddr); /* Segment physical address */
193 bswap32s(&phdr->p_filesz); /* Segment size in file */
194 bswap32s(&phdr->p_memsz); /* Segment size in memory */
195 bswap32s(&phdr->p_flags); /* Segment flags */
196 bswap32s(&phdr->p_align); /* Segment alignment */
200 static void * get_free_page(void)
204 /* User-space version of kernel get_free_page. Returns a page-aligned
205 * page-sized chunk of memory.
207 retval = mmap4k(0, ALPHA_PAGE_SIZE, PROT_READ|PROT_WRITE,
208 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
210 if((long)retval == -1) {
211 perror("get_free_page");
219 static void free_page(void * pageaddr)
221 (void)munmap(pageaddr, ALPHA_PAGE_SIZE);
225 * 'copy_string()' copies argument/envelope strings from user
226 * memory to free pages in kernel mem. These are in a format ready
227 * to be put directly into the top of new user memory.
230 static unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
233 char *tmp, *tmp1, *pag = NULL;
237 return 0; /* bullet-proofing */
240 if (!(tmp1 = tmp = get_user(argv+argc))) {
241 fprintf(stderr, "VFS: argc is wrong");
244 while (get_user(tmp++));
246 if (p < len) { /* this shouldn't happen - 128kB */
252 offset = p % X86_PAGE_SIZE;
253 if (!(pag = (char *) page[p/X86_PAGE_SIZE]) &&
254 !(pag = (char *) page[p/X86_PAGE_SIZE] =
255 (unsigned long *) get_free_page())) {
259 if (len == 0 || offset == 0) {
260 *(pag + offset) = get_user(tmp);
263 int bytes_to_copy = (len > offset) ? offset : len;
264 tmp -= bytes_to_copy;
266 offset -= bytes_to_copy;
267 len -= bytes_to_copy;
268 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
275 static int in_group_p(gid_t g)
277 /* return TRUE if we're in the specified group, FALSE otherwise */
280 gid_t grouplist[NGROUPS];
282 ngroup = getgroups(NGROUPS, grouplist);
283 for(i = 0; i < ngroup; i++) {
284 if(grouplist[i] == g) {
291 static int count(char ** vec)
295 for(i = 0; *vec; i++) {
302 static int prepare_binprm(struct linux_binprm *bprm)
306 int retval, id_change;
308 if(fstat(bprm->fd, &st) < 0) {
313 if(!S_ISREG(mode)) { /* Must be regular file */
316 if(!(mode & 0111)) { /* Must have at least one execute bit set */
320 bprm->e_uid = geteuid();
321 bprm->e_gid = getegid();
326 bprm->e_uid = st.st_uid;
327 if(bprm->e_uid != geteuid()) {
334 * If setgid is set but no group execute bit then this
335 * is a candidate for mandatory locking, not a setgid
338 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
339 bprm->e_gid = st.st_gid;
340 if (!in_group_p(bprm->e_gid)) {
345 memset(bprm->buf, 0, sizeof(bprm->buf));
346 retval = lseek(bprm->fd, 0L, SEEK_SET);
348 retval = read(bprm->fd, bprm->buf, 128);
351 perror("prepare_binprm");
353 /* return(-errno); */
360 unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
361 struct image_info * info)
363 unsigned long stack_base, size, error;
366 /* Create enough stack to hold everything. If we don't use
367 * it for args, we'll use it for something else...
369 size = x86_stack_size;
370 if (size < MAX_ARG_PAGES*X86_PAGE_SIZE)
371 size = MAX_ARG_PAGES*X86_PAGE_SIZE;
372 error = (unsigned long)mmap4k(NULL,
373 size + X86_PAGE_SIZE,
374 PROT_READ | PROT_WRITE,
375 MAP_PRIVATE | MAP_ANONYMOUS,
381 /* we reserve one extra page at the top of the stack as guard */
382 mprotect((void *)(error + size), X86_PAGE_SIZE, PROT_NONE);
384 stack_base = error + size - MAX_ARG_PAGES*X86_PAGE_SIZE;
388 bprm->loader += stack_base;
390 bprm->exec += stack_base;
392 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
396 memcpy((void *)stack_base, (void *)bprm->page[i], X86_PAGE_SIZE);
397 free_page((void *)bprm->page[i]);
399 stack_base += X86_PAGE_SIZE;
404 static void set_brk(unsigned long start, unsigned long end)
406 /* page-align the start and end addresses... */
407 start = ALPHA_PAGE_ALIGN(start);
408 end = ALPHA_PAGE_ALIGN(end);
411 if((long)mmap4k(start, end - start,
412 PROT_READ | PROT_WRITE | PROT_EXEC,
413 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
414 perror("cannot mmap brk");
420 /* We need to explicitly zero any fractional pages
421 after the data section (i.e. bss). This would
422 contain the junk from the file that should not
426 static void padzero(unsigned long elf_bss)
431 nbyte = elf_bss & (ALPHA_PAGE_SIZE-1); /* was X86_PAGE_SIZE - JRP */
433 nbyte = ALPHA_PAGE_SIZE - nbyte;
434 fpnt = (char *) elf_bss;
441 static unsigned int * create_elf_tables(char *p, int argc, int envc,
442 struct elfhdr * exec,
443 unsigned long load_addr,
444 unsigned long load_bias,
445 unsigned long interp_load_addr, int ibcs,
446 struct image_info *info)
448 target_ulong *argv, *envp, *dlinfo;
452 * Force 16 byte alignment here for generality.
454 sp = (unsigned int *) (~15UL & (unsigned long) p);
455 sp -= exec ? DLINFO_ITEMS*2 : 2;
462 put_user(tswapl((target_ulong)envp),--sp);
463 put_user(tswapl((target_ulong)argv),--sp);
466 #define NEW_AUX_ENT(id, val) \
467 put_user (tswapl(id), dlinfo++); \
468 put_user (tswapl(val), dlinfo++)
470 if (exec) { /* Put this here for an ELF program interpreter */
471 NEW_AUX_ENT (AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
472 NEW_AUX_ENT (AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
473 NEW_AUX_ENT (AT_PHNUM, (target_ulong)(exec->e_phnum));
474 NEW_AUX_ENT (AT_PAGESZ, (target_ulong)(ALPHA_PAGE_SIZE));
475 NEW_AUX_ENT (AT_BASE, (target_ulong)(interp_load_addr));
476 NEW_AUX_ENT (AT_FLAGS, (target_ulong)0);
477 NEW_AUX_ENT (AT_ENTRY, load_bias + exec->e_entry);
478 NEW_AUX_ENT (AT_UID, (target_ulong) getuid());
479 NEW_AUX_ENT (AT_EUID, (target_ulong) geteuid());
480 NEW_AUX_ENT (AT_GID, (target_ulong) getgid());
481 NEW_AUX_ENT (AT_EGID, (target_ulong) getegid());
483 NEW_AUX_ENT (AT_NULL, 0);
485 put_user(tswapl(argc),--sp);
486 info->arg_start = (unsigned int)((unsigned long)p & 0xffffffff);
488 put_user(tswapl((target_ulong)p),argv++);
489 while (get_user(p++)) /* nothing */ ;
492 info->arg_end = info->env_start = (unsigned int)((unsigned long)p & 0xffffffff);
494 put_user(tswapl((target_ulong)p),envp++);
495 while (get_user(p++)) /* nothing */ ;
498 info->env_end = (unsigned int)((unsigned long)p & 0xffffffff);
504 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
506 unsigned long *interp_load_addr)
508 struct elf_phdr *elf_phdata = NULL;
509 struct elf_phdr *eppnt;
510 unsigned long load_addr = 0;
511 int load_addr_set = 0;
513 unsigned long last_bss, elf_bss;
522 bswap_ehdr(interp_elf_ex);
524 /* First of all, some simple consistency checks */
525 if ((interp_elf_ex->e_type != ET_EXEC &&
526 interp_elf_ex->e_type != ET_DYN) ||
527 !elf_check_arch(interp_elf_ex->e_machine)) {
532 /* Now read in all of the header information */
534 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > X86_PAGE_SIZE)
537 elf_phdata = (struct elf_phdr *)
538 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
544 * If the size of this structure has changed, then punt, since
545 * we will be doing the wrong thing.
547 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
552 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
554 retval = read(interpreter_fd,
556 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
559 perror("load_elf_interp");
566 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
571 if (interp_elf_ex->e_type == ET_DYN) {
572 /* in order to avoid harcoding the interpreter load
573 address in qemu, we allocate a big enough memory zone */
574 error = (unsigned long)mmap4k(NULL, INTERP_MAP_SIZE,
575 PROT_NONE, MAP_PRIVATE | MAP_ANON,
586 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
587 if (eppnt->p_type == PT_LOAD) {
588 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
590 unsigned long vaddr = 0;
593 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
594 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
595 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
596 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
597 elf_type |= MAP_FIXED;
598 vaddr = eppnt->p_vaddr;
600 error = (unsigned long)mmap4k(load_addr+X86_ELF_PAGESTART(vaddr),
601 eppnt->p_filesz + X86_ELF_PAGEOFFSET(eppnt->p_vaddr),
605 eppnt->p_offset - X86_ELF_PAGEOFFSET(eppnt->p_vaddr));
607 if (error > -1024UL) {
609 close(interpreter_fd);
614 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
620 * Find the end of the file mapping for this phdr, and keep
621 * track of the largest address we see for this.
623 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
624 if (k > elf_bss) elf_bss = k;
627 * Do the same thing for the memory mapping - between
628 * elf_bss and last_bss is the bss section.
630 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
631 if (k > last_bss) last_bss = k;
634 /* Now use mmap to map the library into memory. */
636 close(interpreter_fd);
639 * Now fill out the bss section. First pad the last page up
640 * to the page boundary, and then perform a mmap to make sure
641 * that there are zeromapped pages up to and including the last
645 elf_bss = X86_ELF_PAGESTART(elf_bss + ALPHA_PAGE_SIZE - 1); /* What we have mapped so far */
647 /* Map the last of the bss segment */
648 if (last_bss > elf_bss) {
649 mmap4k(elf_bss, last_bss-elf_bss,
650 PROT_READ|PROT_WRITE|PROT_EXEC,
651 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
655 *interp_load_addr = load_addr;
656 return ((unsigned long) interp_elf_ex->e_entry) + load_addr;
661 static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
662 struct image_info * info)
664 struct elfhdr elf_ex;
665 struct elfhdr interp_elf_ex;
666 struct exec interp_ex;
667 int interpreter_fd = -1; /* avoid warning */
668 unsigned long load_addr, load_bias;
669 int load_addr_set = 0;
670 unsigned int interpreter_type = INTERPRETER_NONE;
671 unsigned char ibcs2_interpreter;
674 struct elf_phdr * elf_ppnt;
675 struct elf_phdr *elf_phdata;
676 unsigned long elf_bss, k, elf_brk;
678 char * elf_interpreter;
679 unsigned long elf_entry, interp_load_addr = 0;
681 unsigned long start_code, end_code, end_data;
682 unsigned long elf_stack;
683 char passed_fileno[6];
685 ibcs2_interpreter = 0;
689 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
694 if (elf_ex.e_ident[0] != 0x7f ||
695 strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) {
699 /* First of all, some simple consistency checks */
700 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
701 (! elf_check_arch(elf_ex.e_machine))) {
705 /* Now read in all of the header information */
707 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
708 if (elf_phdata == NULL) {
712 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
714 retval = read(bprm->fd, (char *) elf_phdata,
715 elf_ex.e_phentsize * elf_ex.e_phnum);
719 perror("load_elf_binary");
726 elf_ppnt = elf_phdata;
727 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
728 bswap_phdr(elf_ppnt);
731 elf_ppnt = elf_phdata;
738 elf_interpreter = NULL;
743 for(i=0;i < elf_ex.e_phnum; i++) {
744 if (elf_ppnt->p_type == PT_INTERP) {
745 if ( elf_interpreter != NULL )
748 free(elf_interpreter);
753 /* This is the program interpreter used for
754 * shared libraries - for now assume that this
755 * is an a.out format binary
758 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
760 if (elf_interpreter == NULL) {
766 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
768 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
771 perror("load_elf_binary2");
775 /* If the program interpreter is one of these two,
776 then assume an iBCS2 image. Otherwise assume
777 a native linux image. */
779 /* JRP - Need to add X86 lib dir stuff here... */
781 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
782 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
783 ibcs2_interpreter = 1;
787 printf("Using ELF interpreter %s\n", elf_interpreter);
790 retval = open(path(elf_interpreter), O_RDONLY);
792 interpreter_fd = retval;
795 perror(elf_interpreter);
797 /* retval = -errno; */
802 retval = lseek(interpreter_fd, 0, SEEK_SET);
804 retval = read(interpreter_fd,bprm->buf,128);
808 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
809 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
812 perror("load_elf_binary3");
815 free(elf_interpreter);
823 /* Some simple consistency checks for the interpreter */
824 if (elf_interpreter){
825 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
827 /* Now figure out which format our binary is */
828 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
829 (N_MAGIC(interp_ex) != QMAGIC)) {
830 interpreter_type = INTERPRETER_ELF;
833 if (interp_elf_ex.e_ident[0] != 0x7f ||
834 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
835 interpreter_type &= ~INTERPRETER_ELF;
838 if (!interpreter_type) {
839 free(elf_interpreter);
846 /* OK, we are done with that, now set up the arg stuff,
847 and then start this sucker up */
849 if (!bprm->sh_bang) {
852 if (interpreter_type == INTERPRETER_AOUT) {
853 sprintf(passed_fileno, "%d", bprm->fd);
854 passed_p = passed_fileno;
856 if (elf_interpreter) {
857 bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p);
862 if (elf_interpreter) {
863 free(elf_interpreter);
871 /* OK, This is the point of no return */
874 info->start_mmap = (unsigned long)ELF_START_MMAP;
876 elf_entry = (unsigned long) elf_ex.e_entry;
878 /* Do this so that we can load the interpreter, if need be. We will
879 change some of these later */
881 bprm->p = setup_arg_pages(bprm->p, bprm, info);
882 info->start_stack = bprm->p;
884 /* Now we do a little grungy work by mmaping the ELF image into
885 * the correct location in memory. At this point, we assume that
886 * the image should be loaded at fixed address, not at a variable
890 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
895 if (elf_ppnt->p_type != PT_LOAD)
898 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
899 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
900 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
901 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
902 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
903 elf_flags |= MAP_FIXED;
904 } else if (elf_ex.e_type == ET_DYN) {
905 /* Try and get dynamic programs out of the way of the default mmap
906 base, as well as whatever program they might try to exec. This
907 is because the brk will follow the loader, and is not movable. */
908 /* NOTE: for qemu, we do a big mmap to get enough space
909 without harcoding any address */
910 error = (unsigned long)mmap4k(NULL, ET_DYN_MAP_SIZE,
911 PROT_NONE, MAP_PRIVATE | MAP_ANON,
917 load_bias = X86_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
920 error = (unsigned long)mmap4k(
921 X86_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
922 (elf_ppnt->p_filesz +
923 X86_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
925 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
927 (elf_ppnt->p_offset -
928 X86_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
935 if (X86_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
936 elf_stack = X86_ELF_PAGESTART(elf_ppnt->p_vaddr);
939 if (!load_addr_set) {
941 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
942 if (elf_ex.e_type == ET_DYN) {
944 X86_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
945 load_addr += load_bias;
948 k = elf_ppnt->p_vaddr;
951 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
954 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
958 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
959 if (k > elf_brk) elf_brk = k;
962 elf_entry += load_bias;
963 elf_bss += load_bias;
964 elf_brk += load_bias;
965 start_code += load_bias;
966 end_code += load_bias;
967 // start_data += load_bias;
968 end_data += load_bias;
970 if (elf_interpreter) {
971 if (interpreter_type & 1) {
972 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
974 else if (interpreter_type & 2) {
975 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
979 close(interpreter_fd);
980 free(elf_interpreter);
982 if (elf_entry == ~0UL) {
983 printf("Unable to load interpreter\n");
992 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
993 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
996 info->start_stack = bprm->p = elf_stack - 4;
998 bprm->p = (unsigned long)
999 create_elf_tables((char *)bprm->p,
1002 (interpreter_type == INTERPRETER_ELF ? &elf_ex : NULL),
1003 load_addr, load_bias,
1005 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1007 if (interpreter_type == INTERPRETER_AOUT)
1008 info->arg_start += strlen(passed_fileno) + 1;
1009 info->start_brk = info->brk = elf_brk;
1010 info->end_code = end_code;
1011 info->start_code = start_code;
1012 info->end_data = end_data;
1013 info->start_stack = bprm->p;
1015 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1017 set_brk(elf_bss, elf_brk);
1022 printf("(start_brk) %x\n" , info->start_brk);
1023 printf("(end_code) %x\n" , info->end_code);
1024 printf("(start_code) %x\n" , info->start_code);
1025 printf("(end_data) %x\n" , info->end_data);
1026 printf("(start_stack) %x\n" , info->start_stack);
1027 printf("(brk) %x\n" , info->brk);
1030 if ( info->personality == PER_SVR4 )
1032 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1033 and some applications "depend" upon this behavior.
1034 Since we do not have the power to recompile these, we
1035 emulate the SVr4 behavior. Sigh. */
1036 mapped_addr = mmap4k(NULL, ALPHA_PAGE_SIZE, PROT_READ | PROT_EXEC,
1037 MAP_FIXED | MAP_PRIVATE, -1, 0);
1040 #ifdef ELF_PLAT_INIT
1042 * The ABI may specify that certain registers be set up in special
1043 * ways (on i386 %edx is the address of a DT_FINI function, for
1044 * example. This macro performs whatever initialization to
1045 * the regs structure is required.
1047 ELF_PLAT_INIT(regs);
1051 info->entry = elf_entry;
1058 int elf_exec(const char * filename, char ** argv, char ** envp,
1059 struct target_pt_regs * regs, struct image_info *infop)
1061 struct linux_binprm bprm;
1065 bprm.p = X86_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int);
1066 for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
1068 retval = open(filename, O_RDONLY);
1072 /* return retval; */
1077 bprm.filename = (char *)filename;
1082 bprm.argc = count(argv);
1083 bprm.envc = count(envp);
1085 retval = prepare_binprm(&bprm);
1088 bprm.p = copy_strings(1, &bprm.filename, bprm.page, bprm.p);
1090 bprm.p = copy_strings(bprm.envc,envp,bprm.page,bprm.p);
1091 bprm.p = copy_strings(bprm.argc,argv,bprm.page,bprm.p);
1098 retval = load_elf_binary(&bprm,regs,infop);
1101 /* success. Initialize important registers */
1102 regs->esp = infop->start_stack;
1103 regs->eip = infop->entry;
1107 /* Something went wrong, return the inode and free the argument pages*/
1108 for (i=0 ; i<MAX_ARG_PAGES ; i++) {
1109 free_page((void *)bprm.page[i]);
1115 static int load_aout_interp(void * exptr, int interp_fd)
1117 printf("a.out interpreter not yet supported\n");