libmthread: add guard pages to stacks
Add guard pages to the top of the stack to catch overflow errors. Moreover, fix a bug where libmthread would keep using a stack that was just deallocated; a detached thread would deallocate its own stack after it was finished running).
This commit is contained in:
parent
0949f5b342
commit
accaae9b2c
4 changed files with 80 additions and 12 deletions
|
@ -2,6 +2,8 @@
|
|||
#include <errno.h>
|
||||
#include <minix/mthread.h>
|
||||
#include <string.h>
|
||||
#include <machine/param.h>
|
||||
#include <sys/mman.h>
|
||||
#include "global.h"
|
||||
#include "proto.h"
|
||||
|
||||
|
@ -11,11 +13,14 @@ FORWARD _PROTOTYPE( void mthread_thread_init, (mthread_thread_t thread,
|
|||
void *(*proc)(void *),
|
||||
void *arg) );
|
||||
|
||||
FORWARD _PROTOTYPE( void mthread_thread_reset, (mthread_thread_t thread));
|
||||
FORWARD _PROTOTYPE( void mthread_thread_stop, (mthread_thread_t thread));
|
||||
FORWARD _PROTOTYPE( void mthread_trampoline, (void) );
|
||||
|
||||
PRIVATE int initialized = 0;
|
||||
#ifndef PGSHIFT
|
||||
# define PGSHIFT 12 /* XXX: temporarily for ACK */
|
||||
#endif
|
||||
#define MTHREAD_GUARDSIZE (1 << PGSHIFT) /* 1 page */
|
||||
|
||||
PRIVATE struct __mthread_attr default_attr = { MTHREAD_STACK_MIN,
|
||||
NULL,
|
||||
|
@ -229,6 +234,7 @@ PUBLIC void mthread_init(void)
|
|||
if (!initialized) {
|
||||
no_threads = 0;
|
||||
used_threads = 0;
|
||||
need_reset = 0;
|
||||
running_main_thread = 1;/* mthread_init can only be called from the
|
||||
* main thread. Calling it from a thread will
|
||||
* not enter this clause.
|
||||
|
@ -385,14 +391,51 @@ void *arg;
|
|||
stacksize = tcb->m_attr.ma_stacksize;
|
||||
stackaddr = tcb->m_attr.ma_stackaddr;
|
||||
|
||||
if (stacksize == (size_t) 0)
|
||||
if (stacksize == (size_t) 0) {
|
||||
/* User provided too small a stack size. Forget about that stack and
|
||||
* allocate a new one ourselves.
|
||||
*/
|
||||
stacksize = (size_t) MTHREAD_STACK_MIN;
|
||||
tcb->m_attr.ma_stackaddr = stackaddr = NULL;
|
||||
}
|
||||
|
||||
if (stackaddr == NULL) {
|
||||
/* Allocate stack space */
|
||||
tcb->m_context.uc_stack.ss_sp = malloc(stacksize);
|
||||
if (tcb->m_context.uc_stack.ss_sp == NULL)
|
||||
size_t guarded_stacksize;
|
||||
char *guard_start, *guard_end;
|
||||
|
||||
stacksize = round_page(stacksize + MTHREAD_GUARDSIZE);
|
||||
stackaddr = minix_mmap(NULL, stacksize,
|
||||
PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
|
||||
-1, 0);
|
||||
if (stackaddr == NULL)
|
||||
mthread_panic("Failed to allocate stack to thread");
|
||||
|
||||
#if (_MINIX_CHIP == _CHIP_INTEL)
|
||||
guard_start = stackaddr;
|
||||
guard_end = stackaddr + MTHREAD_GUARDSIZE;
|
||||
guarded_stacksize = stackaddr + stacksize - guard_end;
|
||||
|
||||
/* The stack will be used from (stackaddr+stacksize) to stackaddr. That
|
||||
* is, growing downwards. So the "top" of the stack may not grow into
|
||||
* stackaddr+TH_GUARDSIZE.
|
||||
*
|
||||
* +-------+ stackaddr + stacksize
|
||||
* | |
|
||||
* | | |
|
||||
* | \|/ |
|
||||
* | |
|
||||
* +-------+ stackaddr + TH_GUARDSIZE
|
||||
* | GUARD |
|
||||
* +-------+ stackaddr
|
||||
*/
|
||||
#else
|
||||
# error "Unsupported platform"
|
||||
#endif
|
||||
stacksize = guarded_stacksize;
|
||||
if (minix_munmap(guard_start, MTHREAD_GUARDSIZE) != 0)
|
||||
mthread_panic("unable to unmap stack space for guard");
|
||||
tcb->m_context.uc_stack.ss_sp = guard_end;
|
||||
} else
|
||||
tcb->m_context.uc_stack.ss_sp = stackaddr;
|
||||
|
||||
|
@ -406,7 +449,7 @@ void *arg;
|
|||
/*===========================================================================*
|
||||
* mthread_thread_reset *
|
||||
*===========================================================================*/
|
||||
PRIVATE void mthread_thread_reset(thread)
|
||||
PUBLIC void mthread_thread_reset(thread)
|
||||
mthread_thread_t thread;
|
||||
{
|
||||
/* Reset the thread to its default values. Free the allocated stack space. */
|
||||
|
@ -423,8 +466,12 @@ mthread_thread_t thread;
|
|||
rt->m_result = NULL;
|
||||
rt->m_cond = NULL;
|
||||
if (rt->m_attr.ma_stackaddr == NULL) { /* We allocated stack space */
|
||||
if (rt->m_context.uc_stack.ss_sp)
|
||||
free(rt->m_context.uc_stack.ss_sp); /* Free allocated stack */
|
||||
if (rt->m_context.uc_stack.ss_sp) {
|
||||
if (minix_munmap(rt->m_context.uc_stack.ss_sp,
|
||||
rt->m_context.uc_stack.ss_size) != 0) {
|
||||
mthread_panic("unable to unmap memory");
|
||||
}
|
||||
}
|
||||
rt->m_context.uc_stack.ss_sp = NULL;
|
||||
}
|
||||
rt->m_context.uc_stack.ss_size = 0;
|
||||
|
@ -450,14 +497,19 @@ mthread_thread_t thread;
|
|||
return;
|
||||
}
|
||||
|
||||
mthread_thread_reset(thread);
|
||||
|
||||
if (mthread_cond_destroy(&(stop_thread->m_exited)) != 0)
|
||||
mthread_panic("Could not destroy condition at thread deallocation\n");
|
||||
|
||||
/* Can't deallocate ourselves (i.e., we're a detached thread) */
|
||||
if (thread == current_thread) {
|
||||
stop_thread->m_state = MS_NEEDRESET;
|
||||
need_reset++;
|
||||
} else {
|
||||
mthread_thread_reset(thread);
|
||||
used_threads--;
|
||||
mthread_queue_add(&free_threads, thread);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*===========================================================================*
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#define MTHREAD_NOT_INUSE 0xdefec7
|
||||
|
||||
typedef enum {
|
||||
MS_CONDITION, MS_DEAD, MS_EXITING, MS_MUTEX, MS_RUNNABLE
|
||||
MS_CONDITION, MS_DEAD, MS_EXITING, MS_MUTEX, MS_RUNNABLE, MS_NEEDRESET
|
||||
} mthread_state_t;
|
||||
|
||||
struct __mthread_tcb {
|
||||
|
@ -43,5 +43,6 @@ EXTERN mthread_tcb_t **threads;
|
|||
EXTERN mthread_tcb_t mainthread;
|
||||
EXTERN int no_threads;
|
||||
EXTERN int used_threads;
|
||||
EXTERN int need_reset;
|
||||
EXTERN int running_main_thread;
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
/* allocate.c */
|
||||
_PROTOTYPE( mthread_tcb_t * mthread_find_tcb, (mthread_thread_t thread) );
|
||||
_PROTOTYPE( void mthread_thread_reset, (mthread_thread_t thread) );
|
||||
|
||||
/* attribute.c */
|
||||
_PROTOTYPE( void mthread_init_valid_attributes, (void) );
|
||||
|
|
|
@ -151,9 +151,23 @@ mthread_thread_t thread; /* Thread to make runnable */
|
|||
PUBLIC int mthread_yield(void)
|
||||
{
|
||||
/* Defer further execution of the current thread and let another thread run. */
|
||||
mthread_tcb_t *tcb;
|
||||
mthread_thread_t t;
|
||||
|
||||
mthread_init(); /* Make sure libmthread is initialized */
|
||||
|
||||
/* Detached threads cannot clean themselves up. This is a perfect moment to
|
||||
* do it */
|
||||
for (t = (mthread_thread_t) 0; need_reset > 0 && t < no_threads; t++) {
|
||||
tcb = mthread_find_tcb(t);
|
||||
if (tcb->m_state == MS_NEEDRESET) {
|
||||
mthread_thread_reset(t);
|
||||
used_threads--;
|
||||
need_reset--;
|
||||
mthread_queue_add(&free_threads, t);
|
||||
}
|
||||
}
|
||||
|
||||
if (mthread_queue_isempty(&run_queue)) { /* No point in yielding. */
|
||||
return(-1);
|
||||
} else if (current_thread == NO_THREAD) {
|
||||
|
|
Loading…
Reference in a new issue