SMP - fixed IPI livelock
- two CPUs can issue IPI to each other now without any hazzard - we must be able to handle synchronous scheduling IPIs from other CPUs when we are waiting for attention from another one. Otherwise we might livelock. - necessary barriers to prevent reordering
This commit is contained in:
parent
0468fca72b
commit
0a55e63413
4 changed files with 74 additions and 20 deletions
|
@ -17,8 +17,13 @@
|
|||
#ifdef USE_APIC
|
||||
#include "apic.h"
|
||||
#endif
|
||||
|
||||
#include "spinlock.h"
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#include "kernel/smp.h"
|
||||
#endif
|
||||
|
||||
#define CLOCK_ACK_BIT 0x80 /* PS/2 clock interrupt acknowledge bit */
|
||||
|
||||
/* Clock parameters. */
|
||||
|
@ -235,6 +240,20 @@ PUBLIC void context_stop(struct proc * p)
|
|||
bkl_succ[cpu] += !(!(succ == 0));
|
||||
|
||||
p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Since at the time we got a scheduling IPI we might have been
|
||||
* waiting for BKL already, we may miss it due to a similar IPI to
|
||||
* the cpu which is already waiting for us to handle its. This
|
||||
* results in a live-lock of these two cpus.
|
||||
*
|
||||
* Therefore we always check if there is one pending and if so,
|
||||
* we handle it straight away so the other cpu can continue and
|
||||
* we do not deadlock.
|
||||
*/
|
||||
smp_sched_handler();
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
read_tsc_64(&tsc);
|
||||
|
|
67
kernel/smp.c
67
kernel/smp.c
|
@ -66,11 +66,13 @@ PUBLIC void smp_schedule(unsigned cpu)
|
|||
* check if the cpu is processing some other ipi already. If yes, no
|
||||
* need to wake it up
|
||||
*/
|
||||
if ((volatile unsigned)sched_ipi_data[cpu].flags != 0)
|
||||
if (sched_ipi_data[cpu].flags != 0)
|
||||
return;
|
||||
arch_send_smp_schedule_ipi(cpu);
|
||||
}
|
||||
|
||||
_PROTOTYPE(void smp_sched_handler, (void));
|
||||
|
||||
/*
|
||||
* tell another cpu about a task to do and return only after the cpu acks that
|
||||
* the task is finished. Also wait before it finishes task sent by another cpu
|
||||
|
@ -79,24 +81,39 @@ PUBLIC void smp_schedule(unsigned cpu)
|
|||
PRIVATE void smp_schedule_sync(struct proc * p, unsigned task)
|
||||
{
|
||||
unsigned cpu = p->p_cpu;
|
||||
unsigned mycpu = cpuid;
|
||||
|
||||
/*
|
||||
assert(cpu != mycpu);
|
||||
/*
|
||||
* if some other cpu made a request to the same cpu, wait until it is
|
||||
* done before proceeding
|
||||
*/
|
||||
if ((volatile unsigned)sched_ipi_data[cpu].flags != 0) {
|
||||
if (sched_ipi_data[cpu].flags != 0) {
|
||||
BKL_UNLOCK();
|
||||
while ((volatile unsigned)sched_ipi_data[cpu].flags != 0);
|
||||
while (sched_ipi_data[cpu].flags != 0) {
|
||||
if (sched_ipi_data[mycpu].flags) {
|
||||
BKL_LOCK();
|
||||
smp_sched_handler();
|
||||
BKL_UNLOCK();
|
||||
}
|
||||
}
|
||||
BKL_LOCK();
|
||||
}
|
||||
|
||||
sched_ipi_data[cpu].flags |= task;
|
||||
sched_ipi_data[cpu].data = (u32_t) p;
|
||||
sched_ipi_data[cpu].flags |= task;
|
||||
__insn_barrier();
|
||||
arch_send_smp_schedule_ipi(cpu);
|
||||
|
||||
/* wait until the destination cpu finishes its job */
|
||||
BKL_UNLOCK();
|
||||
while ((volatile unsigned)sched_ipi_data[cpu].flags != 0);
|
||||
while (sched_ipi_data[cpu].flags != 0) {
|
||||
if (sched_ipi_data[mycpu].flags) {
|
||||
BKL_LOCK();
|
||||
smp_sched_handler();
|
||||
BKL_UNLOCK();
|
||||
}
|
||||
}
|
||||
BKL_LOCK();
|
||||
}
|
||||
|
||||
|
@ -142,20 +159,16 @@ PUBLIC void smp_schedule_migrate_proc(struct proc * p, unsigned dest_cpu)
|
|||
RTS_UNSET(p, RTS_PROC_STOP);
|
||||
}
|
||||
|
||||
PUBLIC void smp_ipi_sched_handler(void)
|
||||
PUBLIC void smp_sched_handler(void)
|
||||
{
|
||||
struct proc * curr;
|
||||
unsigned mycpu = cpuid;
|
||||
unsigned flgs;
|
||||
|
||||
ipi_ack();
|
||||
|
||||
curr = get_cpu_var(mycpu, proc_ptr);
|
||||
flgs = sched_ipi_data[mycpu].flags;
|
||||
unsigned cpu = cpuid;
|
||||
|
||||
flgs = sched_ipi_data[cpu].flags;
|
||||
|
||||
if (flgs) {
|
||||
struct proc * p;
|
||||
p = (struct proc *)sched_ipi_data[mycpu].data;
|
||||
p = (struct proc *)sched_ipi_data[cpu].data;
|
||||
|
||||
if (flgs & SCHED_IPI_STOP_PROC) {
|
||||
RTS_SET(p, RTS_PROC_STOP);
|
||||
|
@ -174,9 +187,25 @@ PUBLIC void smp_ipi_sched_handler(void)
|
|||
RTS_SET(p, RTS_VMINHIBIT);
|
||||
}
|
||||
}
|
||||
else if (curr->p_endpoint != IDLE) {
|
||||
RTS_SET(curr, RTS_PREEMPTED);
|
||||
}
|
||||
sched_ipi_data[cpuid].flags = 0;
|
||||
|
||||
__insn_barrier();
|
||||
sched_ipi_data[cpu].flags = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function gets always called only after smp_sched_handler() has been
|
||||
* already called. It only serves the purpose of acknowledging the IPI and
|
||||
* preempting the current process if the CPU was not idle.
|
||||
*/
|
||||
PUBLIC void smp_ipi_sched_handler(void)
|
||||
{
|
||||
struct proc * curr;
|
||||
|
||||
ipi_ack();
|
||||
|
||||
curr = get_cpulocal_var(proc_ptr);
|
||||
if (curr->p_endpoint != IDLE) {
|
||||
RTS_SET(curr, RTS_PREEMPTED);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -73,6 +73,9 @@ _PROTOTYPE(void smp_schedule_migrate_proc,
|
|||
_PROTOTYPE(void arch_send_smp_schedule_ipi, (unsigned cpu));
|
||||
_PROTOTYPE(void arch_smp_halt_cpu, (void));
|
||||
|
||||
/* deal with x-cpu scheduling event */
|
||||
_PROTOTYPE(void smp_sched_handler, (void));
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
|
|
@ -33,9 +33,12 @@ PUBLIC void panic(const char *fmt, ...)
|
|||
printf("\n");
|
||||
}
|
||||
|
||||
printf("kernel: ");
|
||||
printf("kernel on CPU %d: ", cpuid);
|
||||
util_stacktrace();
|
||||
|
||||
printf("current process : ");
|
||||
proc_stacktrace(get_cpulocal_var(proc_ptr));
|
||||
|
||||
/* Abort MINIX. */
|
||||
minix_shutdown(NULL);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue