kernel: new DEBUG_RACE option. try to provoke race conditions between processes.

it does this by 
  - making all processes interruptible by running out of quantum
  - giving all processes a single tick of quantum
  - picking a random runnable process instead of in order, and
    from a single pool of runnable processes (no priorities)

This together with very high HZ values currently provokes some race conditions
seen earlier only when running with SMP.
This commit is contained in:
Ben Gras 2010-05-08 18:00:03 +00:00
parent d5a0af826a
commit a1636b85b7
3 changed files with 54 additions and 0 deletions

View file

@ -35,6 +35,7 @@
#include <assert.h>
#include "clock.h"
#include "debug.h"
#ifdef CONFIG_WATCHDOG
#include "watchdog.h"
@ -199,9 +200,15 @@ PUBLIC int ap_timer_int_handler(void)
billp = bill_ptr;
p->p_user_time += ticks;
#if DEBUG_RACE
/* With DEBUG_RACE, every process gets interrupted. */
p->p_ticks_left = 0;
#else
if (priv(p)->s_flags & PREEMPTIBLE) {
p->p_ticks_left -= ticks;
}
#endif
if (! (priv(p)->s_flags & BILLABLE)) {
billp->p_sys_time += ticks;
}

View file

@ -30,6 +30,13 @@
/* Verbose messages. */
#define DEBUG_TRACE 0
/* DEBUG_RACE makes every process preemptible, schedules
* every process on the same priority queue, and randomizes
* the next process to run, in order to help catch race
* conditions that could otherwise be masked.
*/
#define DEBUG_RACE 0
#if DEBUG_TRACE
#define VF_SCHEDULING (1L << 1)

View file

@ -1144,6 +1144,11 @@ PUBLIC void enqueue(
*/
int q = rp->p_priority; /* scheduling queue to use */
#if DEBUG_RACE
/* With DEBUG_RACE, schedule everyone at the same priority level. */
rp->p_priority = q = MIN_USER_Q;
#endif
assert(proc_is_runnable(rp));
assert(q >= 0);
@ -1255,6 +1260,32 @@ PUBLIC void dequeue(const struct proc *rp)
#endif
}
#if DEBUG_RACE
/*===========================================================================*
* random_process *
*===========================================================================*/
PRIVATE struct proc *random_process(struct proc *head)
{
int i, n = 0;
struct proc *rp;
u64_t r;
read_tsc_64(&r);
for(rp = head; rp; rp = rp->p_nextready)
n++;
/* Use low-order word of TSC as pseudorandom value. */
i = r.lo % n;
for(rp = head; i--; rp = rp->p_nextready)
;
assert(rp);
return rp;
}
#endif
/*===========================================================================*
* pick_proc *
*===========================================================================*/
@ -1276,6 +1307,11 @@ PRIVATE struct proc * pick_proc(void)
TRACE(VF_PICKPROC, printf("queue %d empty\n", q););
continue;
}
#if DEBUG_RACE
rp = random_process(rdy_head[q]);
#endif
TRACE(VF_PICKPROC, printf("found %s / %d on queue %d\n",
rp->p_name, rp->p_endpoint, q););
assert(proc_is_runnable(rp));
@ -1393,6 +1429,10 @@ PUBLIC void check_ticks_left(struct proc * p)
* be renewed. In fact, they by pass scheduling
*/
p->p_ticks_left = p->p_quantum_size;
#if DEBUG_RACE
RTS_SET(proc_ptr, RTS_PREEMPTED);
RTS_UNSET(proc_ptr, RTS_PREEMPTED);
#endif
}
}
}