2010-09-15 16:09:52 +02:00
|
|
|
#include "smp.h"
|
2010-09-15 16:10:57 +02:00
|
|
|
#include "interrupt.h"
|
2010-09-15 16:09:52 +02:00
|
|
|
|
|
|
|
unsigned ncpus;
|
|
|
|
unsigned ht_per_core;
|
|
|
|
unsigned bsp_cpu_id;
|
|
|
|
|
2010-09-15 16:11:09 +02:00
|
|
|
PUBLIC struct cpu cpus[CONFIG_MAX_CPUS];
|
|
|
|
|
|
|
|
/* flags passed to another cpu along with a sched ipi */
|
|
|
|
struct sched_ipi_data {
|
|
|
|
volatile u32_t flags;
|
|
|
|
volatile u32_t data;
|
|
|
|
};
|
|
|
|
|
|
|
|
PRIVATE struct sched_ipi_data sched_ipi_data[CONFIG_MAX_CPUS];
|
|
|
|
|
|
|
|
#define SCHED_IPI_STOP_PROC 1
|
2010-09-15 16:10:03 +02:00
|
|
|
|
2010-09-15 16:10:12 +02:00
|
|
|
static volatile unsigned ap_cpus_booted;
|
|
|
|
|
2010-09-15 16:10:03 +02:00
|
|
|
SPINLOCK_DEFINE(big_kernel_lock)
|
2010-09-15 16:10:12 +02:00
|
|
|
SPINLOCK_DEFINE(boot_lock)
|
|
|
|
|
2010-09-15 16:10:54 +02:00
|
|
|
PUBLIC void wait_for_APs_to_finish_booting(void)
|
2010-09-15 16:10:12 +02:00
|
|
|
{
|
|
|
|
/* we must let the other CPUs to run in kernel mode first */
|
|
|
|
BKL_UNLOCK();
|
|
|
|
while (ap_cpus_booted != (ncpus - 1))
|
|
|
|
arch_pause();
|
|
|
|
/* now we have to take the lock again as we continu execution */
|
|
|
|
BKL_LOCK();
|
|
|
|
}
|
|
|
|
|
2010-09-15 16:10:54 +02:00
|
|
|
PUBLIC void ap_boot_finished(unsigned cpu)
|
2010-09-15 16:10:12 +02:00
|
|
|
{
|
|
|
|
ap_cpus_booted++;
|
|
|
|
}
|
2010-09-15 16:10:54 +02:00
|
|
|
|
|
|
|
PUBLIC void smp_ipi_halt_handler(void)
|
|
|
|
{
|
2010-09-15 16:10:57 +02:00
|
|
|
ipi_ack();
|
2010-09-15 16:11:06 +02:00
|
|
|
stop_local_timer();
|
2010-09-15 16:10:54 +02:00
|
|
|
arch_smp_halt_cpu();
|
|
|
|
}
|
|
|
|
|
2010-09-15 16:10:57 +02:00
|
|
|
PUBLIC void smp_schedule(unsigned cpu)
|
|
|
|
{
|
2010-09-15 16:11:09 +02:00
|
|
|
/*
|
|
|
|
* check if the cpu is processing some other ipi already. If yes, no
|
|
|
|
* need to wake it up
|
|
|
|
*/
|
|
|
|
if ((volatile unsigned)sched_ipi_data[cpu].flags != 0)
|
|
|
|
return;
|
|
|
|
arch_send_smp_schedule_ipi(cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
PUBLIC void smp_schedule_stop_proc(struct proc * p)
|
|
|
|
{
|
|
|
|
unsigned cpu = p->p_cpu;
|
|
|
|
|
|
|
|
sched_ipi_data[cpu].flags |= SCHED_IPI_STOP_PROC;
|
|
|
|
sched_ipi_data[cpu].data = (u32_t) p;
|
2010-09-15 16:10:57 +02:00
|
|
|
arch_send_smp_schedule_ipi(cpu);
|
2010-09-15 16:11:09 +02:00
|
|
|
BKL_UNLOCK();
|
|
|
|
while ((volatile unsigned)sched_ipi_data[cpu].flags != 0);
|
|
|
|
BKL_LOCK();
|
2010-09-15 16:10:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
PUBLIC void smp_ipi_sched_handler(void)
|
|
|
|
{
|
|
|
|
struct proc * p;
|
2010-09-15 16:11:09 +02:00
|
|
|
unsigned mycpu = cpuid;
|
|
|
|
unsigned flgs;
|
2010-09-15 16:10:57 +02:00
|
|
|
|
|
|
|
ipi_ack();
|
|
|
|
|
2010-09-15 16:11:09 +02:00
|
|
|
p = get_cpu_var(mycpu, proc_ptr);
|
|
|
|
flgs = sched_ipi_data[mycpu].flags;
|
2010-09-15 16:10:57 +02:00
|
|
|
|
2010-09-15 16:11:09 +02:00
|
|
|
if (flgs & SCHED_IPI_STOP_PROC) {
|
|
|
|
RTS_SET((struct proc *)sched_ipi_data[mycpu].data, RTS_PROC_STOP);
|
|
|
|
}
|
|
|
|
else if (p->p_endpoint != IDLE) {
|
|
|
|
RTS_SET(p, RTS_PREEMPTED);
|
|
|
|
}
|
|
|
|
sched_ipi_data[cpuid].flags = 0;
|
2010-09-15 16:10:57 +02:00
|
|
|
}
|
|
|
|
|