2010-09-15 16:09:52 +02:00
|
|
|
#ifndef __SMP_H__
|
|
|
|
#define __SMP_H__
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
2012-11-15 12:06:41 +01:00
|
|
|
#include "kernel/kernel.h"
|
2010-09-15 16:09:52 +02:00
|
|
|
#include "arch_smp.h"
|
2010-09-15 16:10:03 +02:00
|
|
|
#include "spinlock.h"
|
2010-09-15 16:09:52 +02:00
|
|
|
|
|
|
|
/* number of CPUs (execution strands in the system */
|
|
|
|
EXTERN unsigned ncpus;
|
|
|
|
/* Number of virtual strands per physical core */
|
|
|
|
EXTERN unsigned ht_per_core;
|
2013-10-09 17:28:23 +02:00
|
|
|
/* which cpu is bootstrapping */
|
2010-09-15 16:09:52 +02:00
|
|
|
EXTERN unsigned bsp_cpu_id;
|
|
|
|
|
|
|
|
#define cpu_is_bsp(cpu) (bsp_cpu_id == cpu)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SMP initialization is largely architecture dependent and each architecture
|
|
|
|
* must provide a method how to do it. If initiating SMP fails the function does
|
|
|
|
* not report it. However it must put the system in such a state that it falls
|
|
|
|
* back to a uniprocessor system. Although the uniprocessor configuration may be
|
|
|
|
* suboptimal, the system must be able to run on the bootstrap processor as if
|
|
|
|
* it was the only processor in the system
|
|
|
|
*/
|
2012-03-24 16:16:34 +01:00
|
|
|
void smp_init(void);
|
2010-09-15 16:09:52 +02:00
|
|
|
|
|
|
|
#define CPU_IS_BSP 1
|
|
|
|
#define CPU_IS_READY 2
|
|
|
|
|
|
|
|
struct cpu {
|
|
|
|
u32_t flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
EXTERN struct cpu cpus[CONFIG_MAX_CPUS];
|
|
|
|
|
|
|
|
#define cpu_set_flag(cpu, flag) do { cpus[cpu].flags |= (flag); } while(0)
|
|
|
|
#define cpu_clear_flag(cpu, flag) do { cpus[cpu].flags &= ~(flag); } while(0)
|
|
|
|
#define cpu_test_flag(cpu, flag) (cpus[cpu].flags & (flag))
|
|
|
|
#define cpu_is_ready(cpu) cpu_test_flag(cpu, CPU_IS_READY)
|
|
|
|
|
2010-09-15 16:10:12 +02:00
|
|
|
/*
|
|
|
|
* Big Kernel Lock prevents more then one cpu executing the kernel code
|
|
|
|
*/
|
2010-09-15 16:10:03 +02:00
|
|
|
SPINLOCK_DECLARE(big_kernel_lock)
|
2010-09-15 16:10:12 +02:00
|
|
|
/*
|
|
|
|
* to sync the booting APs
|
|
|
|
*/
|
|
|
|
SPINLOCK_DECLARE(boot_lock)
|
|
|
|
|
2012-03-24 16:16:34 +01:00
|
|
|
void wait_for_APs_to_finish_booting(void);
|
|
|
|
void ap_boot_finished(unsigned cpu);
|
|
|
|
void smp_shutdown_aps(void );
|
2010-09-15 16:10:57 +02:00
|
|
|
|
|
|
|
/* IPI handlers */
|
2012-03-24 16:16:34 +01:00
|
|
|
void smp_ipi_halt_handler(void);
|
|
|
|
void smp_ipi_sched_handler(void);
|
2010-09-15 16:10:57 +02:00
|
|
|
|
2012-03-24 16:16:34 +01:00
|
|
|
void smp_schedule(unsigned cpu);
|
2010-09-15 16:11:12 +02:00
|
|
|
/* stop a processes on a different cpu */
|
2012-03-24 16:16:34 +01:00
|
|
|
void smp_schedule_stop_proc(struct proc * p);
|
2013-10-09 17:28:23 +02:00
|
|
|
/* stop a process on a different cpu because its address space is being changed */
|
2012-03-24 16:16:34 +01:00
|
|
|
void smp_schedule_vminhibit(struct proc * p);
|
2010-09-15 16:11:25 +02:00
|
|
|
/* stop the process and for saving its full context */
|
2012-03-24 16:16:34 +01:00
|
|
|
void smp_schedule_stop_proc_save_ctx(struct proc * p);
|
2010-09-15 16:11:25 +02:00
|
|
|
/* migrate the full context of a process to the destination CPU */
|
2012-03-24 16:16:34 +01:00
|
|
|
void smp_schedule_migrate_proc(struct proc * p, unsigned dest_cpu);
|
2010-09-15 16:10:57 +02:00
|
|
|
|
2012-03-24 16:16:34 +01:00
|
|
|
void arch_send_smp_schedule_ipi(unsigned cpu);
|
|
|
|
void arch_smp_halt_cpu(void);
|
2010-09-15 16:10:03 +02:00
|
|
|
|
2011-10-26 17:43:36 +02:00
|
|
|
/* deal with x-cpu scheduling event */
|
2012-03-24 16:16:34 +01:00
|
|
|
void smp_sched_handler(void);
|
2011-10-26 17:43:36 +02:00
|
|
|
|
2010-09-15 16:09:52 +02:00
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
|
|
#endif /* __SMP_H__ */
|