No locking in kernel code

- No locking in RTS_(UN)SET macros

- No lock_notify()

- Removed unused lock_send()

- No lock/unlock macros anymore
This commit is contained in:
Tomas Hruby 2010-02-09 15:26:58 +00:00
parent 064cb7583a
commit c6fec6866f
24 changed files with 38 additions and 161 deletions

View file

@ -35,7 +35,7 @@ struct proc *p;
p->p_seg.p_cr3 = 0;
p->p_misc_flags &= ~MF_FULLVM;
}
RTS_LOCK_UNSET(p, RTS_VMINHIBIT);
RTS_UNSET(p, RTS_VMINHIBIT);
return OK;
case VMCTL_INCSP:
/* Increase process SP. */

View file

@ -34,9 +34,7 @@ PUBLIC int do_int86(struct proc * caller, message * m_ptr)
* Figuring out the exact source is too complicated. CLOCK_IRQ is normally
* not very random.
*/
lock;
get_randomness(&krandom, CLOCK_IRQ);
unlock;
return(OK);
}

View file

@ -78,7 +78,7 @@ void pagefault( struct proc *pr,
/* Don't schedule this process until pagefault is handled. */
vmassert(pr->p_seg.p_cr3 == read_cr3());
vmassert(!RTS_ISSET(pr, RTS_PAGEFAULT));
RTS_LOCK_SET(pr, RTS_PAGEFAULT);
RTS_SET(pr, RTS_PAGEFAULT);
/* Save pagefault details, suspend process,
* add process to pagefault chain,

View file

@ -569,7 +569,7 @@ PRIVATE void vm_suspend(struct proc *caller, struct proc *target,
vmassert(!RTS_ISSET(caller, RTS_VMREQUEST));
vmassert(!RTS_ISSET(target, RTS_VMREQUEST));
RTS_LOCK_SET(caller, RTS_VMREQUEST);
RTS_SET(caller, RTS_VMREQUEST);
#if DEBUG_VMASSERT
caller->p_vmrequest.stacktrace[0] = '\0';
@ -837,7 +837,7 @@ int vmcheck; /* if nonzero, can return VMSUSPEND */
if(caller && RTS_ISSET(caller, RTS_VMREQUEST)) {
vmassert(caller->p_vmrequest.vmresult != VMSUSPEND);
RTS_LOCK_UNSET(caller, RTS_VMREQUEST);
RTS_UNSET(caller, RTS_VMREQUEST);
if(caller->p_vmrequest.vmresult != OK) {
#if DEBUG_VMASSERT
printf("virtual_copy: returning VM error %d\n",

View file

@ -277,11 +277,7 @@ PRIVATE void ser_dump_segs(void)
PRIVATE void ser_debug(int c)
{
int u = 0;
serial_debug_active = 1;
/* Disable interrupts so that we get a consistent state. */
if(!intr_disabled()) { lock; u = 1; };
switch(c)
{
@ -314,7 +310,6 @@ PRIVATE void ser_debug(int c)
#endif
}
serial_debug_active = 0;
if(u) { unlock; }
}
PRIVATE void printslot(struct proc *pp, int level)

View file

@ -25,16 +25,6 @@
#define unset_sys_bit(map,bit) \
( MAP_CHUNK(map.chunk,bit) &= ~(1 << CHUNK_OFFSET(bit) )
#define reallock
#define realunlock
/* Disable/ enable hardware interrupts. The parameters of lock() and unlock()
* are used when debugging is enabled. See debug.h for more information.
*/
#define lock reallock
#define unlock realunlock
#ifdef CONFIG_IDLE_TSC
#define IDLE_STOP if(idle_active) { read_tsc_64(&idle_stop); idle_active = 0; }
#else

View file

@ -46,8 +46,6 @@
#define NOREC_ENTER(varname) \
static int varname = NOTENTERED; \
int mustunlock = 0; \
if(!intr_disabled()) { lock; mustunlock = 1; } \
vmassert(varname == ENTERED || varname == NOTENTERED); \
vmassert(magictest == MAGICTEST); \
vmassert(varname != ENTERED); \
@ -58,7 +56,6 @@
vmassert(magictest == MAGICTEST); \
vmassert(varname == ENTERED || varname == NOTENTERED); \
varname = NOTENTERED; \
if(mustunlock) { unlock; } \
return v; \
} while(0)

View file

@ -4,10 +4,6 @@
*
* sys_call: a system call, i.e., the kernel is trapped with an INT
*
* As well as several entry points used from the interrupt and task level:
*
* lock_send: send a message to a process
*
* Changes:
* Aug 19, 2005 rewrote scheduling code (Jorrit N. Herder)
* Jul 25, 2005 rewrote system call handling (Jorrit N. Herder)
@ -45,10 +41,7 @@
#include "proc.h"
#include "vm.h"
/* Scheduling and message passing functions. The functions are available to
* other parts of the kernel through lock_...(). The lock temporarily disables
* interrupts to prevent race conditions.
*/
/* Scheduling and message passing functions */
FORWARD _PROTOTYPE( void idle, (void));
FORWARD _PROTOTYPE( int mini_send, (struct proc *caller_ptr, int dst_e,
message *m_ptr, int flags));
@ -1168,38 +1161,6 @@ PRIVATE int try_one(struct proc *src_ptr, struct proc *dst_ptr, int *postponed)
return EAGAIN;
}
/*===========================================================================*
* lock_notify *
*===========================================================================*/
PUBLIC int lock_notify(src_e, dst_e)
int src_e; /* (endpoint) sender of the notification */
int dst_e; /* (endpoint) who is to be notified */
{
/* Safe gateway to mini_notify() for tasks and interrupt handlers. The sender
* is explicitely given to prevent confusion where the call comes from. MINIX
* kernel is not reentrant, which means to interrupts are disabled after
* the first kernel entry (hardware interrupt, trap, or exception). Locking
* is done by temporarily disabling interrupts.
*/
int result, src_p;
vmassert(!intr_disabled());
if (!isokendpt(src_e, &src_p)) {
kprintf("lock_notify: bogus src: %d\n", src_e);
return EDEADSRCDST;
}
lock;
vmassert(intr_disabled());
result = mini_notify(proc_addr(src_p), dst_e);
vmassert(intr_disabled());
unlock;
vmassert(!intr_disabled());
return(result);
}
/*===========================================================================*
* enqueue *
*===========================================================================*/
@ -1445,7 +1406,6 @@ timer_t *tp; /* watchdog timer pointer */
vmassert(!intr_disabled());
lock;
for (rp=BEG_PROC_ADDR; rp<END_PROC_ADDR; rp++) {
if (! isemptyp(rp)) { /* check slot use */
if (rp->p_priority > rp->p_max_priority) { /* update priority? */
@ -1460,7 +1420,6 @@ timer_t *tp; /* watchdog timer pointer */
}
}
}
unlock;
/* Now schedule a new watchdog timer to balance the queues again. The
* period depends on the total amount of quantum ticks added.
@ -1469,21 +1428,6 @@ timer_t *tp; /* watchdog timer pointer */
set_timer(&queue_timer, get_uptime() + next_period, balance_queues);
}
/*===========================================================================*
* lock_send *
*===========================================================================*/
PUBLIC int lock_send(dst_e, m_ptr)
int dst_e; /* to whom is message being sent? */
message *m_ptr; /* pointer to message buffer */
{
/* Safe gateway to mini_send() for tasks. */
int result;
lock;
result = mini_send(proc_ptr, dst_e, m_ptr, 0);
unlock;
return(result);
}
/*===========================================================================*
* endpoint_lookup *
*===========================================================================*/

View file

@ -182,38 +182,11 @@ struct proc {
vmassert(intr_disabled()); \
} while(0)
/* Set flag and dequeue if the process was runnable. */
#define RTS_LOCK_SET(rp, f) \
do { \
int u = 0; \
if(!intr_disabled()) { u = 1; lock; } \
if(proc_is_runnable(rp)) { dequeue(rp); } \
(rp)->p_rts_flags |= (f); \
if(u) { unlock; } \
} while(0)
/* Clear flag and enqueue if the process was not runnable but is now. */
#define RTS_LOCK_UNSET(rp, f) \
do { \
int rts; \
int u = 0; \
if(!intr_disabled()) { u = 1; lock; } \
rts = (rp)->p_rts_flags; \
(rp)->p_rts_flags &= ~(f); \
if(!rts_f_is_runnable(rts) && proc_is_runnable(rp)) { \
enqueue(rp); \
} \
if(u) { unlock; } \
} while(0)
/* Set flags to this value. */
#define RTS_LOCK_SETFLAGS(rp, f) \
#define RTS_SETFLAGS(rp, f) \
do { \
int u = 0; \
if(!intr_disabled()) { u = 1; lock; } \
if(proc_is_runnable(rp) && (f)) { dequeue(rp); } \
(rp)->p_rts_flags = (f); \
if(u) { unlock; } \
} while(0)
/* Misc flags */

View file

@ -30,9 +30,7 @@ _PROTOTYPE( void minix_panic, (char *s, int n) );
/* proc.c */
_PROTOTYPE( int do_ipc, (int call_nr, int src_dst,
message *m_ptr, long bit_map) );
_PROTOTYPE( int lock_notify, (int src, int dst) );
_PROTOTYPE( int mini_notify, (struct proc *src, endpoint_t dst) );
_PROTOTYPE( int lock_send, (int dst, message *m_ptr) );
_PROTOTYPE( void enqueue, (struct proc *rp) );
_PROTOTYPE( void dequeue, (struct proc *rp) );
_PROTOTYPE( void balance_queues, (struct timer *tp) );

View file

@ -331,11 +331,7 @@ PUBLIC void send_sig(int proc_nr, int sig_nr)
rp = proc_addr(proc_nr);
sigaddset(&priv(rp)->s_sig_pending, sig_nr);
if(!intr_disabled()) {
lock_notify(SYSTEM, rp->p_endpoint);
} else {
mini_notify(proc_addr(SYSTEM), rp->p_endpoint);
}
mini_notify(proc_addr(SYSTEM), rp->p_endpoint);
}
/*===========================================================================*
@ -368,7 +364,7 @@ int sig_nr; /* signal to be sent */
if (! sigismember(&rp->p_pending, sig_nr)) {
sigaddset(&rp->p_pending, sig_nr);
if (! (RTS_ISSET(rp, RTS_SIGNALED))) { /* other pending */
RTS_LOCK_SET(rp, RTS_SIGNALED | RTS_SIG_PENDING);
RTS_SET(rp, RTS_SIGNALED | RTS_SIG_PENDING);
send_sig(PM_PROC_NR, SIGKSIG);
}
}
@ -479,7 +475,7 @@ register struct proc *rc; /* slot of process to clean up */
}
/* Make sure that the exiting process is no longer scheduled. */
RTS_LOCK_SET(rc, RTS_NO_ENDPOINT);
RTS_SET(rc, RTS_NO_ENDPOINT);
if (priv(rc)->s_flags & SYS_PROC)
{
if (priv(rc)->s_asynsize) {
@ -529,7 +525,7 @@ register struct proc *rc; /* slot of process to clean up */
/* Check if process is receiving from exiting process. */
if (RTS_ISSET(rp, RTS_RECEIVING) && rp->p_getfrom_e == rc->p_endpoint) {
rp->p_reg.retreg = ESRCDIED; /* report source died */
RTS_LOCK_UNSET(rp, RTS_RECEIVING); /* no longer receiving */
RTS_UNSET(rp, RTS_RECEIVING); /* no longer receiving */
#if DEBUG_ENABLE_IPC_WARNINGS
kprintf("endpoint %d / %s receiving from dead src ep %d / %s\n",
rp->p_endpoint, rp->p_name, rc->p_endpoint, rc->p_name);
@ -538,7 +534,7 @@ register struct proc *rc; /* slot of process to clean up */
if (RTS_ISSET(rp, RTS_SENDING) &&
rp->p_sendto_e == rc->p_endpoint) {
rp->p_reg.retreg = EDSTDIED; /* report destination died */
RTS_LOCK_UNSET(rp, RTS_SENDING);
RTS_UNSET(rp, RTS_SENDING);
#if DEBUG_ENABLE_IPC_WARNINGS
kprintf("endpoint %d / %s send to dying dst ep %d (%s)\n",
rp->p_endpoint, rp->p_name, rc->p_endpoint, rc->p_name);

View file

@ -33,7 +33,7 @@ PUBLIC int do_endksig(struct proc * caller, message * m_ptr)
/* PM has finished one kernel signal. Perhaps process is ready now? */
if (!RTS_ISSET(rp, RTS_SIGNALED)) /* new signal arrived */
RTS_LOCK_UNSET(rp, RTS_SIG_PENDING); /* remove pending flag */
RTS_UNSET(rp, RTS_SIG_PENDING); /* remove pending flag */
return(OK);
}

View file

@ -41,7 +41,7 @@ PUBLIC int do_exec(struct proc * caller, message * m_ptr)
arch_pre_exec(rp, (u32_t) m_ptr->PR_IP_PTR, (u32_t) m_ptr->PR_STACK_PTR);
/* No reply to EXEC call */
RTS_LOCK_UNSET(rp, RTS_RECEIVING);
RTS_UNSET(rp, RTS_RECEIVING);
/* Mark fpu_regs contents as not significant, so fpu
* will be initialized, when it's used next time. */

View file

@ -70,7 +70,7 @@ register struct proc *rc; /* slot of process to clean up */
/* Make sure that the exiting process is no longer scheduled,
* and mark slot as FREE. Also mark saved fpu contents as not significant.
*/
RTS_LOCK_SETFLAGS(rc, RTS_SLOT_FREE);
RTS_SETFLAGS(rc, RTS_SLOT_FREE);
rc->p_misc_flags &= ~MF_FPU_INITIALIZED;
/* Release the process table slot. If this is a system process, also

View file

@ -105,13 +105,13 @@ PUBLIC int do_fork(struct proc * caller, message * m_ptr)
/* Don't schedule process in VM mode until it has a new pagetable. */
if(m_ptr->PR_FORK_FLAGS & PFF_VMINHIBIT) {
RTS_LOCK_SET(rpc, RTS_VMINHIBIT);
RTS_SET(rpc, RTS_VMINHIBIT);
}
/*
* Only one in group should have RTS_SIGNALED, child doesn't inherit tracing.
*/
RTS_LOCK_UNSET(rpc, (RTS_SIGNALED | RTS_SIG_PENDING | RTS_P_STOP));
RTS_UNSET(rpc, (RTS_SIGNALED | RTS_SIG_PENDING | RTS_P_STOP));
sigemptyset(&rpc->p_pending);
return r;

View file

@ -32,7 +32,7 @@ PUBLIC int do_getksig(struct proc * caller, message * m_ptr)
m_ptr->SIG_ENDPT = rp->p_endpoint;
m_ptr->SIG_MAP = rp->p_pending; /* pending signals map */
sigemptyset(&rp->p_pending); /* ball is in PM's court */
RTS_LOCK_UNSET(rp, RTS_SIGNALED); /* blocked by SIG_PENDING */
RTS_UNSET(rp, RTS_SIGNALED); /* blocked by SIG_PENDING */
return(OK);
}
}

View file

@ -37,12 +37,10 @@ PUBLIC int do_nice(struct proc * caller, message * m_ptr)
if (new_q < MAX_USER_Q) new_q = MAX_USER_Q; /* shouldn't happen */
if (new_q > MIN_USER_Q) new_q = MIN_USER_Q; /* shouldn't happen */
/* Make sure the process is not running while changing its priority.
* Put the process back in its new queue if it is runnable.
*/
RTS_LOCK_SET(rp, RTS_SYS_LOCK);
/* Dequeue the process and put it in its new queue if it is runnable. */
RTS_SET(rp, RTS_SYS_LOCK);
rp->p_max_priority = rp->p_priority = new_q;
RTS_LOCK_UNSET(rp, RTS_SYS_LOCK);
RTS_UNSET(rp, RTS_SYS_LOCK);
return(OK);
}

View file

@ -52,13 +52,13 @@ PUBLIC int do_privctl(struct proc * caller, message * m_ptr)
if (!RTS_ISSET(rp, RTS_NO_PRIV) || priv(rp)->s_proc_nr == NONE) {
return(EPERM);
}
RTS_LOCK_UNSET(rp, RTS_NO_PRIV);
RTS_UNSET(rp, RTS_NO_PRIV);
return(OK);
case SYS_PRIV_DISALLOW:
/* Disallow process from running. */
if (RTS_ISSET(rp, RTS_NO_PRIV)) return(EPERM);
RTS_LOCK_SET(rp, RTS_NO_PRIV);
RTS_SET(rp, RTS_NO_PRIV);
return(OK);
case SYS_PRIV_SET_SYS:

View file

@ -40,14 +40,14 @@ PUBLIC int do_runctl(struct proc * caller, message * m_ptr)
* should not also install signal handlers *and* expect POSIX compliance.
*/
if (action == RC_STOP && (flags & RC_DELAY)) {
RTS_LOCK_SET(rp, RTS_SYS_LOCK);
RTS_SET(rp, RTS_SYS_LOCK);
if (RTS_ISSET(rp, RTS_SENDING) || (rp->p_misc_flags & MF_SC_DEFER))
rp->p_misc_flags |= MF_SIG_DELAY;
delayed = (rp->p_misc_flags & MF_SIG_DELAY);
RTS_LOCK_UNSET(rp, RTS_SYS_LOCK);
RTS_UNSET(rp, RTS_SYS_LOCK);
if (delayed) return(EBUSY);
}
@ -55,10 +55,10 @@ PUBLIC int do_runctl(struct proc * caller, message * m_ptr)
/* Either set or clear the stop flag. */
switch (action) {
case RC_STOP:
RTS_LOCK_SET(rp, RTS_PROC_STOP);
RTS_SET(rp, RTS_PROC_STOP);
break;
case RC_RESUME:
RTS_LOCK_UNSET(rp, RTS_PROC_STOP);
RTS_UNSET(rp, RTS_PROC_STOP);
break;
default:
return(EINVAL);

View file

@ -145,8 +145,8 @@ PUBLIC int map_invoke_vm(struct proc * caller,
vmassert(!RTS_ISSET(caller, RTS_VMREQTARGET));
vmassert(!RTS_ISSET(dst, RTS_VMREQUEST));
vmassert(!RTS_ISSET(dst, RTS_VMREQTARGET));
RTS_LOCK_SET(caller, RTS_VMREQUEST);
RTS_LOCK_SET(dst, RTS_VMREQTARGET);
RTS_SET(caller, RTS_VMREQUEST);
RTS_SET(dst, RTS_VMREQTARGET);
/* Map to the destination. */
caller->p_vmrequest.req_type = req_type;
@ -161,7 +161,7 @@ PUBLIC int map_invoke_vm(struct proc * caller,
/* Connect caller on vmrequest wait queue. */
if(!(caller->p_vmrequest.nextrequestor = vmrequest))
lock_notify(SYSTEM, VM_PROC_NR);
mini_notify(proc_addr(SYSTEM), VM_PROC_NR);
vmrequest = caller;
return OK;

View file

@ -65,7 +65,7 @@ timer_t *tp;
* process with a notification message from CLOCK.
*/
int proc_nr_e = tmr_arg(tp)->ta_int; /* get process number */
lock_notify(CLOCK, proc_nr_e); /* notify process */
mini_notify(proc_addr(CLOCK), proc_nr_e); /* notify process */
}
#endif /* USE_SETALARM */

View file

@ -90,7 +90,7 @@ PUBLIC int do_trace(struct proc * caller, message * m_ptr)
if (isemptyp(rp)) return(EINVAL);
switch (tr_request) {
case T_STOP: /* stop process */
RTS_LOCK_SET(rp, RTS_P_STOP);
RTS_SET(rp, RTS_P_STOP);
rp->p_reg.psw &= ~TRACEBIT; /* clear trace bit */
rp->p_misc_flags &= ~MF_SC_TRACE; /* clear syscall trace flag */
return(OK);
@ -167,19 +167,19 @@ PUBLIC int do_trace(struct proc * caller, message * m_ptr)
/* fall through */
case T_RESUME: /* resume execution */
RTS_LOCK_UNSET(rp, RTS_P_STOP);
RTS_UNSET(rp, RTS_P_STOP);
m_ptr->CTL_DATA = 0;
break;
case T_STEP: /* set trace bit */
rp->p_reg.psw |= TRACEBIT;
RTS_LOCK_UNSET(rp, RTS_P_STOP);
RTS_UNSET(rp, RTS_P_STOP);
m_ptr->CTL_DATA = 0;
break;
case T_SYSCALL: /* trace system call */
rp->p_misc_flags |= MF_SC_TRACE;
RTS_LOCK_UNSET(rp, RTS_P_STOP);
RTS_UNSET(rp, RTS_P_STOP);
m_ptr->CTL_DATA = 0;
break;

View file

@ -100,11 +100,7 @@ PUBLIC int do_vdevio(struct proc * caller, message * m_ptr)
}
}
/* Perform actual device I/O for byte, word, and long values. Note that
* the entire switch is wrapped in lock() and unlock() to prevent the I/O
* batch from being interrupted.
*/
lock;
/* Perform actual device I/O for byte, word, and long values */
switch (io_type) {
case _DIO_BYTE: /* byte values */
if (io_in) for (i=0; i<vec_size; i++)
@ -152,7 +148,6 @@ PUBLIC int do_vdevio(struct proc * caller, message * m_ptr)
}
}
}
unlock;
/* Almost done, copy back results for input requests. */
if (io_in)

View file

@ -33,7 +33,7 @@ PUBLIC int do_vmctl(struct proc * caller, message * m_ptr)
switch(m_ptr->SVMCTL_PARAM) {
case VMCTL_CLEAR_PAGEFAULT:
RTS_LOCK_UNSET(p, RTS_PAGEFAULT);
RTS_UNSET(p, RTS_PAGEFAULT);
return OK;
case VMCTL_MEMREQ_GET:
/* Send VM the information about the memory request. */
@ -125,7 +125,7 @@ PUBLIC int do_vmctl(struct proc * caller, message * m_ptr)
#endif
vmassert(RTS_ISSET(target, RTS_VMREQTARGET));
RTS_LOCK_UNSET(target, RTS_VMREQTARGET);
RTS_UNSET(target, RTS_VMREQTARGET);
switch(p->p_vmrequest.type) {
case VMSTYPE_KERNELCALL:
@ -152,15 +152,10 @@ PUBLIC int do_vmctl(struct proc * caller, message * m_ptr)
p->p_vmrequest.type);
}
RTS_LOCK_UNSET(p, RTS_VMREQUEST);
RTS_UNSET(p, RTS_VMREQUEST);
return OK;
case VMCTL_ENABLE_PAGING:
/*
* system task must not get preempted while switching to paging,
* interrupt handling is not safe
*/
lock;
if(vm_running)
minix_panic("do_vmctl: paging already enabled", NO_NUM);
vm_init(p);
@ -169,14 +164,12 @@ PUBLIC int do_vmctl(struct proc * caller, message * m_ptr)
vmassert(p->p_delivermsg_lin ==
umap_local(p, D, p->p_delivermsg_vir, sizeof(message)));
if ((err = arch_enable_paging()) != OK) {
unlock;
return err;
}
if(newmap(caller, p, (struct mem_map *) m_ptr->SVMCTL_VALUE) != OK)
minix_panic("do_vmctl: newmap failed", NO_NUM);
FIXLINMSG(p);
vmassert(p->p_delivermsg_lin);
unlock;
return OK;
case VMCTL_KERN_PHYSMAP:
{