2010-09-15 16:11:12 +02:00
|
|
|
#include <assert.h>
|
|
|
|
|
2010-09-15 16:09:52 +02:00
|
|
|
#include "smp.h"
|
2010-09-15 16:10:57 +02:00
|
|
|
#include "interrupt.h"
|
2011-12-15 10:56:08 +01:00
|
|
|
#include "clock.h"
|
2010-09-15 16:09:52 +02:00
|
|
|
|
|
|
|
unsigned ncpus;
|
|
|
|
unsigned ht_per_core;
|
|
|
|
unsigned bsp_cpu_id;
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
struct cpu cpus[CONFIG_MAX_CPUS];
|
2010-09-15 16:11:09 +02:00
|
|
|
|
2010-09-15 16:11:12 +02:00
|
|
|
/* info passed to another cpu along with a sched ipi */
|
2010-09-15 16:11:09 +02:00
|
|
|
struct sched_ipi_data {
|
|
|
|
volatile u32_t flags;
|
|
|
|
volatile u32_t data;
|
|
|
|
};
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static struct sched_ipi_data sched_ipi_data[CONFIG_MAX_CPUS];
|
2010-09-15 16:11:09 +02:00
|
|
|
|
|
|
|
#define SCHED_IPI_STOP_PROC 1
|
2010-09-15 16:11:12 +02:00
|
|
|
#define SCHED_IPI_VM_INHIBIT 2
|
2010-09-15 16:11:25 +02:00
|
|
|
#define SCHED_IPI_SAVE_CTX 4
|
2010-09-15 16:10:03 +02:00
|
|
|
|
2010-09-15 16:10:12 +02:00
|
|
|
static volatile unsigned ap_cpus_booted;
|
|
|
|
|
2010-09-15 16:10:03 +02:00
|
|
|
SPINLOCK_DEFINE(big_kernel_lock)
|
2010-09-15 16:10:12 +02:00
|
|
|
SPINLOCK_DEFINE(boot_lock)
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void wait_for_APs_to_finish_booting(void)
|
2010-09-15 16:10:12 +02:00
|
|
|
{
|
2010-09-15 16:11:21 +02:00
|
|
|
unsigned n = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* check how many cpus are actually alive */
|
|
|
|
for (i = 0 ; i < ncpus ; i++) {
|
|
|
|
if (cpu_test_flag(i, CPU_IS_READY))
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
if (n != ncpus)
|
|
|
|
printf("WARNING only %d out of %d cpus booted\n", n, ncpus);
|
|
|
|
|
2010-09-15 16:10:12 +02:00
|
|
|
/* we must let the other CPUs to run in kernel mode first */
|
|
|
|
BKL_UNLOCK();
|
2010-09-15 16:11:21 +02:00
|
|
|
while (ap_cpus_booted != (n - 1))
|
2010-09-15 16:10:12 +02:00
|
|
|
arch_pause();
|
2013-10-09 17:28:23 +02:00
|
|
|
/* now we have to take the lock again as we continue execution */
|
2010-09-15 16:10:12 +02:00
|
|
|
BKL_LOCK();
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void ap_boot_finished(unsigned cpu)
|
2010-09-15 16:10:12 +02:00
|
|
|
{
|
|
|
|
ap_cpus_booted++;
|
|
|
|
}
|
2010-09-15 16:10:54 +02:00
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void smp_ipi_halt_handler(void)
|
2010-09-15 16:10:54 +02:00
|
|
|
{
|
2010-09-15 16:10:57 +02:00
|
|
|
ipi_ack();
|
2010-09-15 16:11:06 +02:00
|
|
|
stop_local_timer();
|
2010-09-15 16:10:54 +02:00
|
|
|
arch_smp_halt_cpu();
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void smp_schedule(unsigned cpu)
|
2010-09-15 16:10:57 +02:00
|
|
|
{
|
2010-09-15 16:11:09 +02:00
|
|
|
arch_send_smp_schedule_ipi(cpu);
|
|
|
|
}
|
|
|
|
|
2012-03-24 16:16:34 +01:00
|
|
|
void smp_sched_handler(void);
|
2011-10-26 17:43:36 +02:00
|
|
|
|
2010-09-15 16:11:12 +02:00
|
|
|
/*
|
|
|
|
* tell another cpu about a task to do and return only after the cpu acks that
|
|
|
|
* the task is finished. Also wait before it finishes task sent by another cpu
|
|
|
|
* to the same one.
|
|
|
|
*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static void smp_schedule_sync(struct proc * p, unsigned task)
|
2010-09-15 16:11:09 +02:00
|
|
|
{
|
|
|
|
unsigned cpu = p->p_cpu;
|
2011-10-26 17:43:36 +02:00
|
|
|
unsigned mycpu = cpuid;
|
2010-09-15 16:11:09 +02:00
|
|
|
|
2011-10-26 17:43:36 +02:00
|
|
|
assert(cpu != mycpu);
|
|
|
|
/*
|
2010-09-15 16:11:12 +02:00
|
|
|
* if some other cpu made a request to the same cpu, wait until it is
|
|
|
|
* done before proceeding
|
|
|
|
*/
|
2011-10-26 17:43:36 +02:00
|
|
|
if (sched_ipi_data[cpu].flags != 0) {
|
2010-09-15 16:11:12 +02:00
|
|
|
BKL_UNLOCK();
|
2011-10-26 17:43:36 +02:00
|
|
|
while (sched_ipi_data[cpu].flags != 0) {
|
|
|
|
if (sched_ipi_data[mycpu].flags) {
|
|
|
|
BKL_LOCK();
|
|
|
|
smp_sched_handler();
|
|
|
|
BKL_UNLOCK();
|
|
|
|
}
|
|
|
|
}
|
2010-09-15 16:11:12 +02:00
|
|
|
BKL_LOCK();
|
|
|
|
}
|
|
|
|
|
2010-09-15 16:11:09 +02:00
|
|
|
sched_ipi_data[cpu].data = (u32_t) p;
|
2011-10-26 17:43:36 +02:00
|
|
|
sched_ipi_data[cpu].flags |= task;
|
|
|
|
__insn_barrier();
|
2010-09-15 16:10:57 +02:00
|
|
|
arch_send_smp_schedule_ipi(cpu);
|
2010-09-15 16:11:12 +02:00
|
|
|
|
|
|
|
/* wait until the destination cpu finishes its job */
|
2010-09-15 16:11:09 +02:00
|
|
|
BKL_UNLOCK();
|
2011-10-26 17:43:36 +02:00
|
|
|
while (sched_ipi_data[cpu].flags != 0) {
|
|
|
|
if (sched_ipi_data[mycpu].flags) {
|
|
|
|
BKL_LOCK();
|
|
|
|
smp_sched_handler();
|
|
|
|
BKL_UNLOCK();
|
|
|
|
}
|
|
|
|
}
|
2010-09-15 16:11:09 +02:00
|
|
|
BKL_LOCK();
|
2010-09-15 16:10:57 +02:00
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void smp_schedule_stop_proc(struct proc * p)
|
2010-09-15 16:11:12 +02:00
|
|
|
{
|
|
|
|
if (proc_is_runnable(p))
|
|
|
|
smp_schedule_sync(p, SCHED_IPI_STOP_PROC);
|
|
|
|
else
|
|
|
|
RTS_SET(p, RTS_PROC_STOP);
|
|
|
|
assert(RTS_ISSET(p, RTS_PROC_STOP));
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void smp_schedule_vminhibit(struct proc * p)
|
2010-09-15 16:11:12 +02:00
|
|
|
{
|
|
|
|
if (proc_is_runnable(p))
|
|
|
|
smp_schedule_sync(p, SCHED_IPI_VM_INHIBIT);
|
|
|
|
else
|
|
|
|
RTS_SET(p, RTS_VMINHIBIT);
|
|
|
|
assert(RTS_ISSET(p, RTS_VMINHIBIT));
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void smp_schedule_stop_proc_save_ctx(struct proc * p)
|
2010-09-15 16:11:25 +02:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* stop the processes and force the complete context of the process to
|
|
|
|
* be saved (i.e. including FPU state and such)
|
|
|
|
*/
|
|
|
|
smp_schedule_sync(p, SCHED_IPI_STOP_PROC | SCHED_IPI_SAVE_CTX);
|
|
|
|
assert(RTS_ISSET(p, RTS_PROC_STOP));
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void smp_schedule_migrate_proc(struct proc * p, unsigned dest_cpu)
|
2010-09-15 16:11:25 +02:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* stop the processes and force the complete context of the process to
|
|
|
|
* be saved (i.e. including FPU state and such)
|
|
|
|
*/
|
|
|
|
smp_schedule_sync(p, SCHED_IPI_STOP_PROC | SCHED_IPI_SAVE_CTX);
|
|
|
|
assert(RTS_ISSET(p, RTS_PROC_STOP));
|
|
|
|
|
|
|
|
/* assign the new cpu and let the process run again */
|
|
|
|
p->p_cpu = dest_cpu;
|
|
|
|
RTS_UNSET(p, RTS_PROC_STOP);
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void smp_sched_handler(void)
|
2010-09-15 16:10:57 +02:00
|
|
|
{
|
2010-09-15 16:11:09 +02:00
|
|
|
unsigned flgs;
|
2011-10-26 17:43:36 +02:00
|
|
|
unsigned cpu = cpuid;
|
|
|
|
|
|
|
|
flgs = sched_ipi_data[cpu].flags;
|
2010-09-15 16:10:57 +02:00
|
|
|
|
2010-09-15 16:11:12 +02:00
|
|
|
if (flgs) {
|
|
|
|
struct proc * p;
|
2011-10-26 17:43:36 +02:00
|
|
|
p = (struct proc *)sched_ipi_data[cpu].data;
|
2010-09-15 16:11:12 +02:00
|
|
|
|
|
|
|
if (flgs & SCHED_IPI_STOP_PROC) {
|
|
|
|
RTS_SET(p, RTS_PROC_STOP);
|
|
|
|
}
|
2010-09-15 16:11:25 +02:00
|
|
|
if (flgs & SCHED_IPI_SAVE_CTX) {
|
2012-03-03 20:33:02 +01:00
|
|
|
/* all context has been saved already, FPU remains */
|
2010-09-15 16:11:25 +02:00
|
|
|
if (proc_used_fpu(p) &&
|
|
|
|
get_cpulocal_var(fpu_owner) == p) {
|
|
|
|
disable_fpu_exception();
|
2012-03-03 20:33:02 +01:00
|
|
|
save_local_fpu(p, FALSE /*retain*/);
|
|
|
|
/* we're preparing to migrate somewhere else */
|
2010-09-15 16:11:25 +02:00
|
|
|
release_fpu(p);
|
|
|
|
}
|
|
|
|
}
|
2010-09-15 16:11:12 +02:00
|
|
|
if (flgs & SCHED_IPI_VM_INHIBIT) {
|
|
|
|
RTS_SET(p, RTS_VMINHIBIT);
|
|
|
|
}
|
2010-09-15 16:11:09 +02:00
|
|
|
}
|
2011-10-26 17:43:36 +02:00
|
|
|
|
|
|
|
__insn_barrier();
|
|
|
|
sched_ipi_data[cpu].flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function gets always called only after smp_sched_handler() has been
|
|
|
|
* already called. It only serves the purpose of acknowledging the IPI and
|
|
|
|
* preempting the current process if the CPU was not idle.
|
|
|
|
*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void smp_ipi_sched_handler(void)
|
2011-10-26 17:43:36 +02:00
|
|
|
{
|
|
|
|
struct proc * curr;
|
|
|
|
|
|
|
|
ipi_ack();
|
|
|
|
|
|
|
|
curr = get_cpulocal_var(proc_ptr);
|
|
|
|
if (curr->p_endpoint != IDLE) {
|
2010-09-15 16:11:12 +02:00
|
|
|
RTS_SET(curr, RTS_PREEMPTED);
|
2010-09-15 16:11:09 +02:00
|
|
|
}
|
2010-09-15 16:10:57 +02:00
|
|
|
}
|
|
|
|
|