minix/servers/vfs/exec.c

758 lines
24 KiB
C
Raw Normal View History

2006-05-11 16:57:23 +02:00
/* This file handles the EXEC system call. It performs the work as follows:
* - see if the permissions allow the file to be executed
* - read the header and extract the sizes
* - fetch the initial args and environment from the user space
* - allocate the memory for the new process
* - copy the initial stack from PM to the process
* - read in the text and data segments and copy to the process
* - take care of setuid and setgid bits
* - fix up 'mproc' table
* - tell kernel about EXEC
* - save offset to initial argc (for ps)
*
* The entry points into this file are:
* pm_exec: perform the EXEC system call
*/
#include "fs.h"
#include <sys/stat.h>
#include <sys/mman.h>
2006-05-11 16:57:23 +02:00
#include <minix/callnr.h>
#include <minix/endpoint.h>
#include <minix/com.h>
#include <minix/u64.h>
2006-05-11 16:57:23 +02:00
#include <signal.h>
2010-12-10 10:27:56 +01:00
#include <stdlib.h>
2006-05-11 16:57:23 +02:00
#include <string.h>
#include <sys/dirent.h>
#include <sys/exec.h>
2010-12-10 10:27:56 +01:00
#include <sys/param.h>
2012-02-13 16:28:04 +01:00
#include "path.h"
#include "vnode.h"
#include "file.h"
#include <minix/vfsif.h>
#include <machine/vmparam.h>
2010-12-10 10:27:56 +01:00
#include <assert.h>
#include <fcntl.h>
2010-12-10 10:27:56 +01:00
#define _KERNEL /* for ELF_AUX_ENTRIES */
#include <libexec.h>
/* fields only used by elf and in VFS */
struct vfs_exec_info {
struct exec_info args; /* libexec exec args */
struct vnode *vp; /* Exec file's vnode */
struct vmnt *vmp; /* Exec file's vmnt */
struct stat sb; /* Exec file's stat structure */
int userflags; /* exec() flags from userland */
int is_dyn; /* Dynamically linked executable */
int elf_main_fd; /* Dyn: FD of main program execuatble */
char execname[PATH_MAX]; /* Full executable invocation */
int vmfd;
int vmfd_used;
};
static int patch_stack(struct vnode *vp, char stack[ARG_MAX],
size_t *stk_bytes, char path[PATH_MAX], vir_bytes *vsp);
static int is_script(struct vfs_exec_info *execi);
static int insert_arg(char stack[ARG_MAX], size_t *stk_bytes, char *arg,
vir_bytes *vsp, char replace);
2012-03-25 20:25:53 +02:00
static void clo_exec(struct fproc *rfp);
static int stack_prepare_elf(struct vfs_exec_info *execi,
char *curstack, size_t *frame_len, vir_bytes *vsp);
static int map_header(struct vfs_exec_info *execi);
static int read_seg(struct exec_info *execi, off_t off, vir_bytes seg_addr, size_t seg_bytes);
2006-05-11 16:57:23 +02:00
#define PTRSIZE sizeof(char *) /* Size of pointers in argv[] and envp[]. */
2010-12-10 10:27:56 +01:00
/* Array of loaders for different object file formats */
typedef int (*exechook_t)(struct vfs_exec_info *execpackage);
typedef int (*stackhook_t)(struct vfs_exec_info *execi, char *curstack,
size_t *frame_len, vir_bytes *vsp);
2010-12-10 10:27:56 +01:00
struct exec_loaders {
libexec_exec_loadfunc_t load_object; /* load executable into memory */
stackhook_t setup_stack; /* prepare stack before argc and argv push */
2012-02-13 16:28:04 +01:00
};
2012-03-25 20:25:53 +02:00
static const struct exec_loaders exec_loaders[] = {
{ libexec_load_elf, stack_prepare_elf },
{ NULL, NULL }
2010-12-10 10:27:56 +01:00
};
VFS: worker thread model overhaul The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
2013-08-30 14:00:50 +02:00
#define lock_exec() lock_proc(fproc_addr(VM_PROC_NR))
#define unlock_exec() unlock_proc(fproc_addr(VM_PROC_NR))
2012-02-13 16:28:04 +01:00
/*===========================================================================*
* get_read_vp *
*===========================================================================*/
static int get_read_vp(struct vfs_exec_info *execi,
char *fullpath, int copyprogname, int sugid, struct lookup *resolve, struct fproc *fp)
{
/* Make the executable that we want to exec() into the binary pointed
* to by 'fullpath.' This function fills in necessary details in the execi
* structure, such as opened vnode. It unlocks and releases the vnode if
* it was already there. This makes it easy to change the executable
* during the exec(), which is often necessary, by calling this function
* more than once. This is specifically necessary when we discover the
* executable is actually a script or a dynamically linked executable.
*/
int r;
/* Caller wants to switch vp to the file in 'fullpath.'
* unlock and put it first if there is any there.
*/
if(execi->vp) {
unlock_vnode(execi->vp);
put_vnode(execi->vp);
execi->vp = NULL;
}
/* Remember/overwrite the executable name if requested. */
if(copyprogname) {
char *cp = strrchr(fullpath, '/');
if(cp) cp++;
else cp = fullpath;
2012-07-13 18:08:06 +02:00
strlcpy(execi->args.progname, cp, sizeof(execi->args.progname));
execi->args.progname[sizeof(execi->args.progname)-1] = '\0';
}
/* Open executable */
if ((execi->vp = eat_path(resolve, fp)) == NULL)
return err_code;
unlock_vmnt(execi->vmp);
2012-04-25 14:44:42 +02:00
if (!S_ISREG(execi->vp->v_mode))
return ENOEXEC;
else if ((r = forbidden(fp, execi->vp, X_BIT)) != OK)
return r;
else
r = req_stat(execi->vp->v_fs_e, execi->vp->v_inode_nr,
VFS_PROC_NR, (vir_bytes) &(execi->sb));
if (r != OK) return r;
/* If caller wants us to, honour suid/guid mode bits. */
if (sugid) {
/* Deal with setuid/setgid executables */
if (execi->vp->v_mode & I_SET_UID_BIT) {
execi->args.new_uid = execi->vp->v_uid;
execi->args.allow_setuid = 1;
}
if (execi->vp->v_mode & I_SET_GID_BIT) {
execi->args.new_gid = execi->vp->v_gid;
execi->args.allow_setuid = 1;
}
}
/* Read in first chunk of file. */
if((r=map_header(execi)) != OK)
return r;
return OK;
}
#define FAILCHECK(expr) if((r=(expr)) != OK) { goto pm_execfinal; } while(0)
#define Get_read_vp(e,f,p,s,rs,fp) do { \
r=get_read_vp(&e,f,p,s,rs,fp); if(r != OK) { FAILCHECK(r); } \
} while(0)
static int vfs_memmap(struct exec_info *execi,
vir_bytes vaddr, vir_bytes len, vir_bytes foffset, u16_t clearend,
int protflags)
{
struct vfs_exec_info *vi = (struct vfs_exec_info *) execi->opaque;
struct vnode *vp = ((struct vfs_exec_info *) execi->opaque)->vp;
int r;
u16_t flags = 0;
if(protflags & PROT_WRITE)
flags |= MVM_WRITABLE;
r = minix_vfs_mmap(execi->proc_e, foffset, len,
vp->v_dev, vp->v_inode_nr, vi->vmfd, vaddr, clearend, flags);
if(r == OK) {
vi->vmfd_used = 1;
}
return r;
}
2006-05-11 16:57:23 +02:00
/*===========================================================================*
* pm_exec *
*===========================================================================*/
VFS: worker thread model overhaul The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
2013-08-30 14:00:50 +02:00
int pm_exec(vir_bytes path, size_t path_len, vir_bytes frame, size_t frame_len,
vir_bytes *pc, vir_bytes *newsp, vir_bytes *UNUSED(ps_str))
2006-05-11 16:57:23 +02:00
{
/* Perform the execve(name, argv, envp) call. The user library builds a
* complete stack image, including pointers, args, environ, etc. The stack
* is copied to a buffer inside VFS, and then to the new core image.
*
* ps_str is not currently used, but may be if the ps_strings structure has to
* be moved to another location.
2006-05-11 16:57:23 +02:00
*/
VFS: worker thread model overhaul The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
2013-08-30 14:00:50 +02:00
int r;
2010-12-10 10:27:56 +01:00
vir_bytes vsp;
static char mbuf[ARG_MAX]; /* buffer for stack and zeroes */
struct vfs_exec_info execi;
2010-12-10 10:27:56 +01:00
int i;
static char fullpath[PATH_MAX],
elf_interpreter[PATH_MAX],
firstexec[PATH_MAX],
finalexec[PATH_MAX];
2012-02-13 16:28:04 +01:00
struct lookup resolve;
VFS: worker thread model overhaul The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
2013-08-30 14:00:50 +02:00
struct fproc *vmfp = fproc_addr(VM_PROC_NR);
stackhook_t makestack = NULL;
struct filp *newfilp = NULL;
2012-02-13 16:28:04 +01:00
lock_exec();
/* unset execi values are 0. */
memset(&execi, 0, sizeof(execi));
execi.vmfd = -1;
/* passed from exec() libc code */
execi.userflags = 0;
No more intel/minix segments. This commit removes all traces of Minix segments (the text/data/stack memory map abstraction in the kernel) and significance of Intel segments (hardware segments like CS, DS that add offsets to all addressing before page table translation). This ultimately simplifies the memory layout and addressing and makes the same layout possible on non-Intel architectures. There are only two types of addresses in the world now: virtual and physical; even the kernel and processes have the same virtual address space. Kernel and user processes can be distinguished at a glance as processes won't use 0xF0000000 and above. No static pre-allocated memory sizes exist any more. Changes to booting: . The pre_init.c leaves the kernel and modules exactly as they were left by the bootloader in physical memory . The kernel starts running using physical addressing, loaded at a fixed location given in its linker script by the bootloader. All code and data in this phase are linked to this fixed low location. . It makes a bootstrap pagetable to map itself to a fixed high location (also in linker script) and jumps to the high address. All code and data then use this high addressing. . All code/data symbols linked at the low addresses is prefixed by an objcopy step with __k_unpaged_*, so that that code cannot reference highly-linked symbols (which aren't valid yet) or vice versa (symbols that aren't valid any more). . The two addressing modes are separated in the linker script by collecting the unpaged_*.o objects and linking them with low addresses, and linking the rest high. Some objects are linked twice, once low and once high. . The bootstrap phase passes a lot of information (e.g. free memory list, physical location of the modules, etc.) using the kinfo struct. . After this bootstrap the low-linked part is freed. . The kernel maps in VM into the bootstrap page table so that VM can begin executing. Its first job is to make page tables for all other boot processes. So VM runs before RS, and RS gets a fully dynamic, VM-managed address space. VM gets its privilege info from RS as usual but that happens after RS starts running. . Both the kernel loading VM and VM organizing boot processes happen using the libexec logic. This removes the last reason for VM to still know much about exec() and vm/exec.c is gone. Further Implementation: . All segments are based at 0 and have a 4 GB limit. . The kernel is mapped in at the top of the virtual address space so as not to constrain the user processes. . Processes do not use segments from the LDT at all; there are no segments in the LDT any more, so no LLDT is needed. . The Minix segments T/D/S are gone and so none of the user-space or in-kernel copy functions use them. The copy functions use a process endpoint of NONE to realize it's a physical address, virtual otherwise. . The umap call only makes sense to translate a virtual address to a physical address now. . Segments-related calls like newmap and alloc_segments are gone. . All segments-related translation in VM is gone (vir2map etc). . Initialization in VM is simpler as no moving around is necessary. . VM and all other boot processes can be linked wherever they wish and will be mapped in at the right location by the kernel and VM respectively. Other changes: . The multiboot code is less special: it does not use mb_print for its diagnostics any more but uses printf() as normal, saving the output into the diagnostics buffer, only printing to the screen using the direct print functions if a panic() occurs. . The multiboot code uses the flexible 'free memory map list' style to receive the list of free memory if available. . The kernel determines the memory layout of the processes to a degree: it tells VM where the kernel starts and ends and where the kernel wants the top of the process to be. VM then uses this entire range, i.e. the stack is right at the top, and mmap()ped bits of memory are placed below that downwards, and the break grows upwards. Other Consequences: . Every process gets its own page table as address spaces can't be separated any more by segments. . As all segments are 0-based, there is no distinction between virtual and linear addresses, nor between userspace and kernel addresses. . Less work is done when context switching, leading to a net performance increase. (8% faster on my machine for 'make servers'.) . The layout and configuration of the GDT makes sysenter and syscall possible.
2012-05-07 16:03:35 +02:00
execi.args.stack_high = kinfo.user_sp;
execi.args.stack_size = DEFAULT_STACK_LIMIT;
VFS: worker thread model overhaul The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
2013-08-30 14:00:50 +02:00
fp->text_size = 0;
fp->data_size = 0;
lookup_init(&resolve, fullpath, PATH_NOFLAGS, &execi.vmp, &execi.vp);
2012-02-13 16:28:04 +01:00
resolve.l_vmnt_lock = VMNT_READ;
resolve.l_vnode_lock = VNODE_READ;
/* Fetch the stack from the user before destroying the old core image. */
if (frame_len > ARG_MAX)
FAILCHECK(ENOMEM); /* stack too big */
make vfs & filesystems use failable copying Change the kernel to add features to vircopy and safecopies so that transparent copy fixing won't happen to avoid deadlocks, and such copies fail with EFAULT. Transparently making copying work from filesystems (as normally done by the kernel & VM when copying fails because of missing/readonly memory) is problematic as it can happen that, for file-mapped ranges, that that same filesystem that is blocked on the copy request is needed to satisfy the memory range, leading to deadlock. Dito for VFS itself, if done with a blocking call. This change makes the copying done from a filesystem fail in such cases with EFAULT by VFS adding the CPF_TRY flag to the grants. If a FS call fails with EFAULT, VFS will then request the range to be made available to VM after the FS is unblocked, allowing it to be used to satisfy the range if need be in another VFS thread. Similarly, for datacopies that VFS itself does, it uses the failable vircopy variant and callers use a wrapper that talk to VM if necessary to get the copy to work. . kernel: add CPF_TRY flag to safecopies . kernel: only request writable ranges to VM for the target buffer when copying fails . do copying in VFS TRY-first . some fixes in VM to build SANITYCHECK mode . add regression test for the cases where - a FS system call needs memory mapped in a process that the FS itself must map. - such a range covers more than one file-mapped region. . add 'try' mode to vircopy, physcopy . add flags field to copy kernel call messages . if CP_FLAG_TRY is set, do not transparently try to fix memory ranges . for use by VFS when accessing user buffers to avoid deadlock . remove some obsolete backwards compatability assignments . VFS: let thread scheduling work for VM requests too Allows VFS to make calls to VM while suspending and resuming the currently running thread. Does currently not work for the main thread. . VM: add fix memory range call for use by VFS Change-Id: I295794269cea51a3163519a9cfe5901301d90b32
2014-01-16 14:22:13 +01:00
r = sys_datacopy_wrapper(fp->fp_endpoint, (vir_bytes) frame, SELF, (vir_bytes) mbuf,
(size_t) frame_len);
if (r != OK) { /* can't fetch stack (e.g. bad virtual addr) */
2012-02-13 16:28:04 +01:00
printf("VFS: pm_exec: sys_datacopy failed\n");
FAILCHECK(r);
}
/* Compute the current virtual stack pointer, has to be done before calling
* patch_stack, which needs it, and will adapt as required. */
vsp = execi.args.stack_high - frame_len;
/* The default is to keep the original user and group IDs */
VFS: worker thread model overhaul The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
2013-08-30 14:00:50 +02:00
execi.args.new_uid = fp->fp_effuid;
execi.args.new_gid = fp->fp_effgid;
/* Get the exec file name. */
FAILCHECK(fetch_name(path, path_len, fullpath));
2012-07-13 18:08:06 +02:00
strlcpy(finalexec, fullpath, PATH_MAX);
strlcpy(firstexec, fullpath, PATH_MAX);
/* Get_read_vp will return an opened vn in execi.
* if necessary it releases the existing vp so we can
* switch after we find out what's inside the file.
* It reads the start of the file.
*/
Get_read_vp(execi, fullpath, 1, 1, &resolve, fp);
/* If this is a script (i.e. has a #!/interpreter line),
* retrieve the name of the interpreter and open that
* executable instead.
*/
if(is_script(&execi)) {
/* patch_stack will add interpreter name and
* args to stack and retrieve the new binary
* name into fullpath.
*/
2012-07-13 18:08:06 +02:00
FAILCHECK(fetch_name(path, path_len, fullpath));
FAILCHECK(patch_stack(execi.vp, mbuf, &frame_len, fullpath, &vsp));
2012-07-13 18:08:06 +02:00
strlcpy(finalexec, fullpath, PATH_MAX);
strlcpy(firstexec, fullpath, PATH_MAX);
Get_read_vp(execi, fullpath, 1, 0, &resolve, fp);
}
/* If this is a dynamically linked executable, retrieve
* the name of that interpreter in elf_interpreter and open that
* executable instead. But open the current executable in an
* fd for the current process.
*/
if(elf_has_interpreter(execi.args.hdr, execi.args.hdr_len,
elf_interpreter, sizeof(elf_interpreter))) {
/* Switch the executable vnode to the interpreter */
execi.is_dyn = 1;
/* The interpreter (loader) needs an fd to the main program,
* which is currently in finalexec
*/
if((r = execi.elf_main_fd = common_open(finalexec, O_RDONLY, 0)) < 0) {
printf("VFS: exec: dynamic: open main exec failed %s (%d)\n",
fullpath, r);
FAILCHECK(r);
2012-02-13 16:28:04 +01:00
}
2010-12-10 10:27:56 +01:00
/* ld.so is linked at 0, but it can relocate itself; we
* want it higher to trap NULL pointer dereferences.
* Let's put it below the stack, and reserve 10MB for ld.so.
*/
execi.args.load_offset =
execi.args.stack_high - execi.args.stack_size - 0xa00000;
/* Remember it */
2012-07-13 18:08:06 +02:00
strlcpy(execi.execname, finalexec, PATH_MAX);
/* The executable we need to execute first (loader)
* is in elf_interpreter, and has to be in fullpath to
* be looked up
*/
2012-07-13 18:08:06 +02:00
strlcpy(fullpath, elf_interpreter, PATH_MAX);
strlcpy(firstexec, elf_interpreter, PATH_MAX);
Get_read_vp(execi, fullpath, 0, 0, &resolve, fp);
}
/* We also want an FD for VM to mmap() the process in if possible. */
{
struct vnode *vp = execi.vp;
assert(vp);
if ((vp->v_vmnt->m_fs_flags & RES_HASPEEK) &&
major(vp->v_dev) != MEMORY_MAJOR) {
int newfd = -1;
if(get_fd(vmfp, 0, R_BIT, &newfd, &newfilp) == OK) {
assert(newfd >= 0 && newfd < OPEN_MAX);
assert(!vmfp->fp_filp[newfd]);
newfilp->filp_count = 1;
newfilp->filp_vno = vp;
newfilp->filp_flags = O_RDONLY;
vmfp->fp_filp[newfd] = newfilp;
/* dup_vnode(vp); */
execi.vmfd = newfd;
execi.args.memmap = vfs_memmap;
}
}
}
/* callback functions and data */
execi.args.copymem = read_seg;
execi.args.clearproc = libexec_clearproc_vm_procctl;
execi.args.clearmem = libexec_clear_sys_memset;
execi.args.allocmem_prealloc_cleared = libexec_alloc_mmap_prealloc_cleared;
execi.args.allocmem_prealloc_junk = libexec_alloc_mmap_prealloc_junk;
execi.args.allocmem_ondemand = libexec_alloc_mmap_ondemand;
execi.args.opaque = &execi;
VFS: worker thread model overhaul The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
2013-08-30 14:00:50 +02:00
execi.args.proc_e = fp->fp_endpoint;
execi.args.frame_len = frame_len;
execi.args.filesize = execi.vp->v_size;
2010-12-10 10:27:56 +01:00
2012-02-13 16:28:04 +01:00
for (i = 0; exec_loaders[i].load_object != NULL; i++) {
r = (*exec_loaders[i].load_object)(&execi.args);
2010-12-10 10:27:56 +01:00
/* Loaded successfully, so no need to try other loaders */
if (r == OK) { makestack = exec_loaders[i].setup_stack; break; }
}
FAILCHECK(r);
/* Inform PM */
VFS: worker thread model overhaul The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
2013-08-30 14:00:50 +02:00
FAILCHECK(libexec_pm_newexec(fp->fp_endpoint, &execi.args));
2010-12-10 10:27:56 +01:00
/* Save off PC */
*pc = execi.args.pc;
2010-12-10 10:27:56 +01:00
/* call a stack-setup function if this executable type wants it */
if(makestack) FAILCHECK(makestack(&execi, mbuf, &frame_len, &vsp));
/* Copy the stack from VFS to new core image. */
make vfs & filesystems use failable copying Change the kernel to add features to vircopy and safecopies so that transparent copy fixing won't happen to avoid deadlocks, and such copies fail with EFAULT. Transparently making copying work from filesystems (as normally done by the kernel & VM when copying fails because of missing/readonly memory) is problematic as it can happen that, for file-mapped ranges, that that same filesystem that is blocked on the copy request is needed to satisfy the memory range, leading to deadlock. Dito for VFS itself, if done with a blocking call. This change makes the copying done from a filesystem fail in such cases with EFAULT by VFS adding the CPF_TRY flag to the grants. If a FS call fails with EFAULT, VFS will then request the range to be made available to VM after the FS is unblocked, allowing it to be used to satisfy the range if need be in another VFS thread. Similarly, for datacopies that VFS itself does, it uses the failable vircopy variant and callers use a wrapper that talk to VM if necessary to get the copy to work. . kernel: add CPF_TRY flag to safecopies . kernel: only request writable ranges to VM for the target buffer when copying fails . do copying in VFS TRY-first . some fixes in VM to build SANITYCHECK mode . add regression test for the cases where - a FS system call needs memory mapped in a process that the FS itself must map. - such a range covers more than one file-mapped region. . add 'try' mode to vircopy, physcopy . add flags field to copy kernel call messages . if CP_FLAG_TRY is set, do not transparently try to fix memory ranges . for use by VFS when accessing user buffers to avoid deadlock . remove some obsolete backwards compatability assignments . VFS: let thread scheduling work for VM requests too Allows VFS to make calls to VM while suspending and resuming the currently running thread. Does currently not work for the main thread. . VM: add fix memory range call for use by VFS Change-Id: I295794269cea51a3163519a9cfe5901301d90b32
2014-01-16 14:22:13 +01:00
FAILCHECK(sys_datacopy_wrapper(SELF, (vir_bytes) mbuf, fp->fp_endpoint,
VFS: worker thread model overhaul The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
2013-08-30 14:00:50 +02:00
(vir_bytes) vsp, (phys_bytes)frame_len));
/* Return new stack pointer to caller */
*newsp = vsp;
VFS: worker thread model overhaul The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
2013-08-30 14:00:50 +02:00
clo_exec(fp);
if (execi.args.allow_setuid) {
/* If after loading the image we're still allowed to run with
2012-02-13 16:28:04 +01:00
* setuid or setgid, change credentials now */
VFS: worker thread model overhaul The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
2013-08-30 14:00:50 +02:00
fp->fp_effuid = execi.args.new_uid;
fp->fp_effgid = execi.args.new_gid;
}
/* Remember the new name of the process */
VFS: worker thread model overhaul The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
2013-08-30 14:00:50 +02:00
strlcpy(fp->fp_name, execi.args.progname, PROC_NAME_LEN);
fp->text_size = execi.args.text_size;
fp->data_size = execi.args.data_size;
2012-02-13 16:28:04 +01:00
pm_execfinal:
if(newfilp) unlock_filp(newfilp);
else if (execi.vp != NULL) {
unlock_vnode(execi.vp);
put_vnode(execi.vp);
2012-02-13 16:28:04 +01:00
}
if(execi.vmfd >= 0 && !execi.vmfd_used) {
if(OK != close_fd(vmfp, execi.vmfd)) {
printf("VFS: unexpected close fail of vm fd\n");
}
}
2012-02-13 16:28:04 +01:00
unlock_exec();
2012-02-13 16:28:04 +01:00
return(r);
2006-05-11 16:57:23 +02:00
}
/* This is a copy-paste of the same macro in libc/sys-minix/stack_utils.c. Keep it
* synchronized. */
#define STACK_MIN_SZ \
( \
sizeof(int) + sizeof(void *) * 2 + \
sizeof(AuxInfo) * PMEF_AUXVECTORS + PMEF_EXECNAMELEN1 + \
sizeof(struct ps_strings) \
)
static int stack_prepare_elf(struct vfs_exec_info *execi, char *frame, size_t *frame_size,
vir_bytes *vsp)
{
AuxInfo *aux_vec, *aux_vec_end;
vir_bytes vap; /* Address in proc space of the first AuxVec. */
Elf_Ehdr const * const elf_header = (Elf_Ehdr *) execi->args.hdr;
struct ps_strings const * const psp = (struct ps_strings *)
(frame + (*frame_size - sizeof(struct ps_strings)));
size_t const execname_len = strlen(execi->execname);
if (!execi->is_dyn)
return OK;
if (execi->args.hdr_len < sizeof(*elf_header)) {
printf("VFS: malformed ELF headers for exec\n");
return ENOEXEC;
}
if (*frame_size < STACK_MIN_SZ) {
printf("VFS: malformed stack for exec(), smaller than minimum"
" possible size.\n");
return ENOEXEC;
}
/* Find first Aux vector in the stack frame. */
vap = (vir_bytes)(psp->ps_envstr + (psp->ps_nenvstr + 1));
aux_vec = (AuxInfo *) (frame + (vap - *vsp));
aux_vec_end = aux_vec + PMEF_AUXVECTORS;
if (((char *)aux_vec < frame) ||
((char *)aux_vec > (frame + *frame_size))) {
printf("VFS: malformed stack for exec(), first AuxVector is"
" not on the stack.\n");
return ENOEXEC;
}
if (((char *)aux_vec_end < frame) ||
((char *)aux_vec_end > (frame + *frame_size))) {
printf("VFS: malformed stack for exec(), last AuxVector is"
" not on the stack.\n");
return ENOEXEC;
}
/* Userland provides a fully filled stack frame, with argc, argv, envp
* and then all the argv and envp strings; consistent with ELF ABI,
* except for a list of Aux vectors that should be between envp points
* and the start of the strings.
*
* It would take some very unpleasant hackery to insert the aux vectors
* before the strings, and correct all the pointers, so the exec code
* in libc makes space for us.
*/
#define AUXINFO(a, type, value) \
do { \
if (a < aux_vec_end) { \
a->a_type = type; \
a->a_v = value; \
a++; \
} else { \
printf("VFS: No more room for ELF AuxVec type %d, skipping it for %s\n", type, execi->execname); \
(aux_vec_end - 1)->a_type = AT_NULL; \
(aux_vec_end - 1)->a_v = 0; \
} \
} while(0)
AUXINFO(aux_vec, AT_BASE, execi->args.load_base);
AUXINFO(aux_vec, AT_ENTRY, execi->args.pc);
AUXINFO(aux_vec, AT_EXECFD, execi->elf_main_fd);
#if 0
AUXINFO(aux_vec, AT_PHDR, XXX ); /* should be &phdr[0] */
AUXINFO(aux_vec, AT_PHENT, elf_header->e_phentsize);
AUXINFO(aux_vec, AT_PHNUM, elf_header->e_phnum);
AUXINFO(aux_vec, AT_RUID, XXX);
AUXINFO(aux_vec, AT_RGID, XXX);
#endif
AUXINFO(aux_vec, AT_EUID, execi->args.new_uid);
AUXINFO(aux_vec, AT_EGID, execi->args.new_gid);
AUXINFO(aux_vec, AT_PAGESZ, PAGE_SIZE);
if(execname_len < PMEF_EXECNAMELEN1) {
char *spacestart;
vir_bytes userp;
/* Empty space starts after aux_vec table; we can put the name
* here. */
spacestart = (char *) aux_vec + 2 * sizeof(AuxInfo);
strlcpy(spacestart, execi->execname, PMEF_EXECNAMELEN1);
memset(spacestart + execname_len, '\0',
PMEF_EXECNAMELEN1 - execname_len);
/* What will the address of the string for the user be */
userp = *vsp + (spacestart - frame);
/* Move back to where the AT_NULL is */
AUXINFO(aux_vec, AT_SUN_EXECNAME, userp);
}
/* Always terminate with AT_NULL */
AUXINFO(aux_vec, AT_NULL, 0);
return OK;
}
2012-02-13 16:28:04 +01:00
/*===========================================================================*
* is_script *
*===========================================================================*/
static int is_script(struct vfs_exec_info *execi)
2006-05-11 16:57:23 +02:00
{
2012-02-13 16:28:04 +01:00
/* Is Interpreted script? */
assert(execi->args.hdr != NULL);
2006-05-11 16:57:23 +02:00
return(execi->args.hdr[0] == '#' && execi->args.hdr[1] == '!'
&& execi->args.hdr_len >= 2);
2006-05-11 16:57:23 +02:00
}
/*===========================================================================*
* patch_stack *
*===========================================================================*/
static int patch_stack(vp, stack, stk_bytes, path, vsp)
2012-02-13 16:28:04 +01:00
struct vnode *vp; /* pointer for open script file */
char stack[ARG_MAX]; /* pointer to stack image within VFS */
size_t *stk_bytes; /* size of initial stack */
2012-02-13 16:28:04 +01:00
char path[PATH_MAX]; /* path to script file */
vir_bytes *vsp;
2006-05-11 16:57:23 +02:00
{
/* Patch the argument vector to include the path name of the script to be
* interpreted, and all strings on the #! line. Returns the path name of
* the interpreter.
*/
enum { INSERT=FALSE, REPLACE=TRUE };
int n, r;
off_t pos, new_pos;
2006-05-11 16:57:23 +02:00
char *sp, *interp = NULL;
unsigned int cum_io;
char buf[PAGE_SIZE];
2006-05-11 16:57:23 +02:00
2012-02-13 16:28:04 +01:00
/* Make 'path' the new argv[0]. */
if (!insert_arg(stack, stk_bytes, path, vsp, REPLACE)) return(ENOMEM);
pos = 0; /* Read from the start of the file */
/* Issue request */
r = req_readwrite(vp->v_fs_e, vp->v_inode_nr, pos, READING, VFS_PROC_NR,
(vir_bytes) buf, sizeof(buf), &new_pos, &cum_io);
if (r != OK) return(r);
2012-02-13 16:28:04 +01:00
n = vp->v_size;
if (n > sizeof(buf))
n = sizeof(buf);
if (n < 2) return ENOEXEC;
2012-02-13 16:28:04 +01:00
sp = &(buf[2]); /* just behind the #! */
2006-05-11 16:57:23 +02:00
n -= 2;
if (n > PATH_MAX) n = PATH_MAX;
2006-05-11 16:57:23 +02:00
2012-02-13 16:28:04 +01:00
/* Use the 'path' variable for temporary storage */
memcpy(path, sp, n);
2006-05-11 16:57:23 +02:00
2012-02-13 16:28:04 +01:00
if ((sp = memchr(path, '\n', n)) == NULL) /* must be a proper line */
2006-05-11 16:57:23 +02:00
return(ENOEXEC);
/* Move sp backwards through script[], prepending each string to stack. */
for (;;) {
/* skip spaces behind argument. */
2012-02-13 16:28:04 +01:00
while (sp > path && (*--sp == ' ' || *sp == '\t')) {}
if (sp == path) break;
2006-05-11 16:57:23 +02:00
sp[1] = 0;
/* Move to the start of the argument. */
2012-02-13 16:28:04 +01:00
while (sp > path && sp[-1] != ' ' && sp[-1] != '\t') --sp;
2006-05-11 16:57:23 +02:00
interp = sp;
if (!insert_arg(stack, stk_bytes, sp, vsp, INSERT)) {
printf("VFS: patch_stack: insert_arg failed\n");
return(ENOMEM);
}
2006-05-11 16:57:23 +02:00
}
if(!interp)
return ENOEXEC;
2012-02-13 16:28:04 +01:00
if (interp != path)
memmove(path, interp, strlen(interp)+1);
2006-05-11 16:57:23 +02:00
return(OK);
}
/*===========================================================================*
* insert_arg *
*===========================================================================*/
static int insert_arg(char stack[ARG_MAX], size_t *stk_bytes, char *arg,
vir_bytes *vsp, char replace)
2006-05-11 16:57:23 +02:00
{
/* Patch the stack so that arg will become argv[0]. Be careful, the
* stack may be filled with garbage, although it normally looks like
* this:
* nargs argv[0] ... argv[nargs-1] NULL envp[0] ... NULL
* followed by the strings "pointed" to by the argv[i] and the envp[i].
* The * pointers are in the new process address space.
*
* Return true iff the operation succeeded.
*/
struct ps_strings *psp;
int offset;
size_t old_bytes = *stk_bytes;
int const arg_len = strlen(arg) + 1;
/* Offset to argv[0][0] in the stack frame. */
int const a0 = (int)(((char **)stack)[1] - *vsp);
2006-05-11 16:57:23 +02:00
/* Check that argv[0] points within the stack frame. */
if ((a0 < 0) || (a0 >= old_bytes)) {
printf("vfs:: argv[0][] not within stack range!! %i\n", a0);
return FALSE;
}
2006-05-11 16:57:23 +02:00
if (!replace) {
/* Prepending arg adds one pointer, one string and a zero byte. */
offset = arg_len + PTRSIZE;
} else {
/* replacing argv[0] with arg adds the difference in length of
* the two strings. Make sure we don't go beyond the stack size
* when computing the length of the current argv[0]. */
offset = arg_len - strnlen(stack + a0, ARG_MAX - a0 - 1);
}
2006-05-11 16:57:23 +02:00
/* As ps_strings follows the strings, ensure the offset is word aligned. */
offset = offset + (PTRSIZE - ((PTRSIZE + offset) % PTRSIZE));
2006-05-11 16:57:23 +02:00
/* The stack will grow (or shrink) by offset bytes. */
if ((*stk_bytes += offset) > ARG_MAX) {
printf("vfs:: offset too big!! %d (max %d)\n", *stk_bytes,
ARG_MAX);
return FALSE;
}
2006-05-11 16:57:23 +02:00
/* Reposition the strings by offset bytes */
memmove(stack + a0 + offset, stack + a0, old_bytes - a0);
/* Put arg in the new space, leaving padding in front of it. */
strlcpy(stack + a0 + offset - arg_len, arg, arg_len);
if (!replace) {
/* Make space for a new argv[0]. */
memmove(stack + 2 * PTRSIZE,
stack + 1 * PTRSIZE, a0 - 2 * PTRSIZE);
((char **) stack)[0]++; /* nargs++; */
}
/* set argv[0] correctly */
((char **) stack)[1] = (char *) a0 - arg_len + *vsp;
/* Update stack pointer in the process address space. */
*vsp -= offset;
/* Update argv and envp in ps_strings */
psp = (struct ps_strings *) (stack + *stk_bytes - sizeof(struct ps_strings));
psp->ps_argvstr -= (offset / PTRSIZE);
if (!replace) {
psp->ps_nargvstr++;
}
psp->ps_envstr = psp->ps_argvstr + psp->ps_nargvstr + 1;
return TRUE;
2006-05-11 16:57:23 +02:00
}
/*===========================================================================*
* read_seg *
*===========================================================================*/
static int read_seg(struct exec_info *execi, off_t off, vir_bytes seg_addr, size_t seg_bytes)
2006-05-11 16:57:23 +02:00
{
/*
* The byte count on read is usually smaller than the segment count, because
* a segment is padded out to a click multiple, and the data segment is only
* partially initialized.
*/
int r;
off_t new_pos;
unsigned int cum_io;
struct vnode *vp = ((struct vfs_exec_info *) execi->opaque)->vp;
2010-12-10 10:27:56 +01:00
2006-05-11 16:57:23 +02:00
/* Make sure that the file is big enough */
if (off + seg_bytes > LONG_MAX) return(EIO);
if ((unsigned long) vp->v_size < off+seg_bytes) return(EIO);
2007-08-07 14:52:47 +02:00
if ((r = req_readwrite(vp->v_fs_e, vp->v_inode_nr, off, READING,
execi->proc_e, (vir_bytes) seg_addr, seg_bytes,
No more intel/minix segments. This commit removes all traces of Minix segments (the text/data/stack memory map abstraction in the kernel) and significance of Intel segments (hardware segments like CS, DS that add offsets to all addressing before page table translation). This ultimately simplifies the memory layout and addressing and makes the same layout possible on non-Intel architectures. There are only two types of addresses in the world now: virtual and physical; even the kernel and processes have the same virtual address space. Kernel and user processes can be distinguished at a glance as processes won't use 0xF0000000 and above. No static pre-allocated memory sizes exist any more. Changes to booting: . The pre_init.c leaves the kernel and modules exactly as they were left by the bootloader in physical memory . The kernel starts running using physical addressing, loaded at a fixed location given in its linker script by the bootloader. All code and data in this phase are linked to this fixed low location. . It makes a bootstrap pagetable to map itself to a fixed high location (also in linker script) and jumps to the high address. All code and data then use this high addressing. . All code/data symbols linked at the low addresses is prefixed by an objcopy step with __k_unpaged_*, so that that code cannot reference highly-linked symbols (which aren't valid yet) or vice versa (symbols that aren't valid any more). . The two addressing modes are separated in the linker script by collecting the unpaged_*.o objects and linking them with low addresses, and linking the rest high. Some objects are linked twice, once low and once high. . The bootstrap phase passes a lot of information (e.g. free memory list, physical location of the modules, etc.) using the kinfo struct. . After this bootstrap the low-linked part is freed. . The kernel maps in VM into the bootstrap page table so that VM can begin executing. Its first job is to make page tables for all other boot processes. So VM runs before RS, and RS gets a fully dynamic, VM-managed address space. VM gets its privilege info from RS as usual but that happens after RS starts running. . Both the kernel loading VM and VM organizing boot processes happen using the libexec logic. This removes the last reason for VM to still know much about exec() and vm/exec.c is gone. Further Implementation: . All segments are based at 0 and have a 4 GB limit. . The kernel is mapped in at the top of the virtual address space so as not to constrain the user processes. . Processes do not use segments from the LDT at all; there are no segments in the LDT any more, so no LLDT is needed. . The Minix segments T/D/S are gone and so none of the user-space or in-kernel copy functions use them. The copy functions use a process endpoint of NONE to realize it's a physical address, virtual otherwise. . The umap call only makes sense to translate a virtual address to a physical address now. . Segments-related calls like newmap and alloc_segments are gone. . All segments-related translation in VM is gone (vir2map etc). . Initialization in VM is simpler as no moving around is necessary. . VM and all other boot processes can be linked wherever they wish and will be mapped in at the right location by the kernel and VM respectively. Other changes: . The multiboot code is less special: it does not use mb_print for its diagnostics any more but uses printf() as normal, saving the output into the diagnostics buffer, only printing to the screen using the direct print functions if a panic() occurs. . The multiboot code uses the flexible 'free memory map list' style to receive the list of free memory if available. . The kernel determines the memory layout of the processes to a degree: it tells VM where the kernel starts and ends and where the kernel wants the top of the process to be. VM then uses this entire range, i.e. the stack is right at the top, and mmap()ped bits of memory are placed below that downwards, and the break grows upwards. Other Consequences: . Every process gets its own page table as address spaces can't be separated any more by segments. . As all segments are 0-based, there is no distinction between virtual and linear addresses, nor between userspace and kernel addresses. . Less work is done when context switching, leading to a net performance increase. (8% faster on my machine for 'make servers'.) . The layout and configuration of the GDT makes sysenter and syscall possible.
2012-05-07 16:03:35 +02:00
&new_pos, &cum_io)) != OK) {
printf("VFS: read_seg: req_readwrite failed (data)\n");
return(r);
}
2012-02-13 16:28:04 +01:00
No more intel/minix segments. This commit removes all traces of Minix segments (the text/data/stack memory map abstraction in the kernel) and significance of Intel segments (hardware segments like CS, DS that add offsets to all addressing before page table translation). This ultimately simplifies the memory layout and addressing and makes the same layout possible on non-Intel architectures. There are only two types of addresses in the world now: virtual and physical; even the kernel and processes have the same virtual address space. Kernel and user processes can be distinguished at a glance as processes won't use 0xF0000000 and above. No static pre-allocated memory sizes exist any more. Changes to booting: . The pre_init.c leaves the kernel and modules exactly as they were left by the bootloader in physical memory . The kernel starts running using physical addressing, loaded at a fixed location given in its linker script by the bootloader. All code and data in this phase are linked to this fixed low location. . It makes a bootstrap pagetable to map itself to a fixed high location (also in linker script) and jumps to the high address. All code and data then use this high addressing. . All code/data symbols linked at the low addresses is prefixed by an objcopy step with __k_unpaged_*, so that that code cannot reference highly-linked symbols (which aren't valid yet) or vice versa (symbols that aren't valid any more). . The two addressing modes are separated in the linker script by collecting the unpaged_*.o objects and linking them with low addresses, and linking the rest high. Some objects are linked twice, once low and once high. . The bootstrap phase passes a lot of information (e.g. free memory list, physical location of the modules, etc.) using the kinfo struct. . After this bootstrap the low-linked part is freed. . The kernel maps in VM into the bootstrap page table so that VM can begin executing. Its first job is to make page tables for all other boot processes. So VM runs before RS, and RS gets a fully dynamic, VM-managed address space. VM gets its privilege info from RS as usual but that happens after RS starts running. . Both the kernel loading VM and VM organizing boot processes happen using the libexec logic. This removes the last reason for VM to still know much about exec() and vm/exec.c is gone. Further Implementation: . All segments are based at 0 and have a 4 GB limit. . The kernel is mapped in at the top of the virtual address space so as not to constrain the user processes. . Processes do not use segments from the LDT at all; there are no segments in the LDT any more, so no LLDT is needed. . The Minix segments T/D/S are gone and so none of the user-space or in-kernel copy functions use them. The copy functions use a process endpoint of NONE to realize it's a physical address, virtual otherwise. . The umap call only makes sense to translate a virtual address to a physical address now. . Segments-related calls like newmap and alloc_segments are gone. . All segments-related translation in VM is gone (vir2map etc). . Initialization in VM is simpler as no moving around is necessary. . VM and all other boot processes can be linked wherever they wish and will be mapped in at the right location by the kernel and VM respectively. Other changes: . The multiboot code is less special: it does not use mb_print for its diagnostics any more but uses printf() as normal, saving the output into the diagnostics buffer, only printing to the screen using the direct print functions if a panic() occurs. . The multiboot code uses the flexible 'free memory map list' style to receive the list of free memory if available. . The kernel determines the memory layout of the processes to a degree: it tells VM where the kernel starts and ends and where the kernel wants the top of the process to be. VM then uses this entire range, i.e. the stack is right at the top, and mmap()ped bits of memory are placed below that downwards, and the break grows upwards. Other Consequences: . Every process gets its own page table as address spaces can't be separated any more by segments. . As all segments are 0-based, there is no distinction between virtual and linear addresses, nor between userspace and kernel addresses. . Less work is done when context switching, leading to a net performance increase. (8% faster on my machine for 'make servers'.) . The layout and configuration of the GDT makes sysenter and syscall possible.
2012-05-07 16:03:35 +02:00
if (r == OK && cum_io != seg_bytes)
printf("VFS: read_seg segment has not been read properly\n");
2010-12-10 10:27:56 +01:00
return(r);
2006-05-11 16:57:23 +02:00
}
/*===========================================================================*
* clo_exec *
*===========================================================================*/
2012-03-25 20:25:53 +02:00
static void clo_exec(struct fproc *rfp)
2006-05-11 16:57:23 +02:00
{
2012-02-13 16:28:04 +01:00
/* Files can be marked with the FD_CLOEXEC bit (in fp->fp_cloexec).
2006-05-11 16:57:23 +02:00
*/
int i;
2006-05-11 16:57:23 +02:00
/* Check the file desriptors one by one for presence of FD_CLOEXEC. */
for (i = 0; i < OPEN_MAX; i++)
if ( FD_ISSET(i, &rfp->fp_cloexec_set))
(void) close_fd(rfp, i);
2006-05-11 16:57:23 +02:00
}
2012-02-13 16:28:04 +01:00
/*===========================================================================*
* map_header *
*===========================================================================*/
static int map_header(struct vfs_exec_info *execi)
2010-12-10 10:27:56 +01:00
{
int r;
unsigned int cum_io;
off_t pos, new_pos;
static char hdr[PAGE_SIZE]; /* Assume that header is not larger than a page */
2010-12-10 10:27:56 +01:00
pos = 0; /* Read from the start of the file */
/* How much is sensible to read */
execi->args.hdr_len = MIN(execi->vp->v_size, sizeof(hdr));
execi->args.hdr = hdr;
r = req_readwrite(execi->vp->v_fs_e, execi->vp->v_inode_nr,
pos, READING, VFS_PROC_NR, (vir_bytes) hdr,
execi->args.hdr_len, &new_pos, &cum_io);
if (r != OK) {
printf("VFS: exec: map_header: req_readwrite failed\n");
return(r);
}
2010-12-10 10:27:56 +01:00
return(OK);
}