kernel: spelling fixes

Change-Id: I73c759bdef98be35be77130895ae0ec497e1b954
This commit is contained in:
Thomas Cort 2013-10-09 11:28:23 -04:00
parent 40fbf55cf3
commit 9f23acf410
23 changed files with 36 additions and 36 deletions

View file

@ -151,7 +151,7 @@ void exception_handler(int is_nested, reg_t *saved_lr, int vector)
*/
if (is_nested) {
/*
* if a problem occured while copying a message from userspace because
* if a problem occurred while copying a message from userspace because
* of a wrong pointer supplied by userland, handle it the only way we
* can handle it ...
*/

View file

@ -60,7 +60,7 @@ typedef int (*kern_phys_map_mapped)(vir_bytes id, vir_bytes new_addr );
/*
* struct used internally by memory.c to keep a list of
* items to map. These should be staticaly allocated
* items to map. These should be statically allocated
* in the individual files and passed as argument.
* The data doesn't need to be initialized. See omap_serial for
* and example usage.
@ -108,7 +108,7 @@ int kern_req_phys_map( phys_bytes base_address, vir_bytes io_size,
/*
* Request a physical mapping and put the result in the given prt
* Note that ptr will only be valid once the callback happend.
* Note that ptr will only be valid once the callback happened.
*/
int kern_phys_map_ptr( phys_bytes base_address, vir_bytes io_size,
kern_phys_map * priv, vir_bytes ptr);

View file

@ -57,7 +57,7 @@ void mem_clear_mapcache(void)
* The target (i.e. in-kernel) mapping area is one of the freepdes[]
* VM has earlier already told the kernel about that is available. It is
* identified as the 'pde' parameter. This value can be chosen freely
* by the caller, as long as it is in range (i.e. 0 or higher and corresonds
* by the caller, as long as it is in range (i.e. 0 or higher and corresponds
* to a known freepde slot). It is up to the caller to keep track of which
* freepde's are in use, and to determine which ones are free to use.
*
@ -902,7 +902,7 @@ int kern_phys_map_mapped_ptr(vir_bytes id, phys_bytes address){
/*
* Request a physical mapping and put the result in the given prt
* Note that ptr will only be valid once the callback happend.
* Note that ptr will only be valid once the callback happened.
*/
int kern_phys_map_ptr(
phys_bytes base_address,

View file

@ -288,7 +288,7 @@ kinfo_t *pre_init(u32_t magic, u32_t ebx)
}
/* pre_init gets executed at the memory location where the kernel was loaded by the boot loader.
* at that stage we only have a minium set of functionality present (all symbols gets renamed to
* at that stage we only have a minimum set of functionality present (all symbols gets renamed to
* ensure this). The following methods are used in that context. Once we jump to kmain they are no
* longer used and the "real" implementations are visible
*/

View file

@ -110,8 +110,8 @@ EXTERN int ioapic_enabled;
struct io_apic {
unsigned id;
vir_bytes addr; /* presently used address */
phys_bytes paddr; /* where is it inphys space */
vir_bytes vaddr; /* adress after paging s on */
phys_bytes paddr; /* where is it in phys space */
vir_bytes vaddr; /* address after paging is on */
unsigned pins;
unsigned gsi_base;
};

View file

@ -59,7 +59,7 @@ void mem_clear_mapcache(void)
* The target (i.e. in-kernel) mapping area is one of the freepdes[]
* VM has earlier already told the kernel about that is available. It is
* identified as the 'pde' parameter. This value can be chosen freely
* by the caller, as long as it is in range (i.e. 0 or higher and corresonds
* by the caller, as long as it is in range (i.e. 0 or higher and corresponds
* to a known freepde slot). It is up to the caller to keep track of which
* freepde's are in use, and to determine which ones are free to use.
*

View file

@ -39,7 +39,7 @@
movl tmp, STREG(pptr)
/*
* restore kernel segments. %cs is aready set and %fs, %gs are not used */
* restore kernel segments. %cs is already set and %fs, %gs are not used */
#define RESTORE_KERNEL_SEGS \
mov $KERN_DS_SELECTOR, %si ;\
mov %si, %ds ;\

View file

@ -10,7 +10,7 @@ int app_cpu_init_timer(unsigned freq);
int timer_int_handler(void);
int init_local_timer(unsigned freq);
/* sto p the local timer ticking */
/* stop the local timer ticking */
void stop_local_timer(void);
/* let the time tick again with the original settings after it was stopped */
void restart_local_timer(void);

View file

@ -54,7 +54,7 @@ EXTERN int verboseflags;
#endif
#ifdef USE_APIC
EXTERN int config_no_apic; /* optionaly turn off apic */
EXTERN int config_no_apic; /* optionally turn off apic */
EXTERN int config_apic_timer_x; /* apic timer slowdown factor */
#endif
@ -64,7 +64,7 @@ EXTERN u64_t cpu_hz[CONFIG_MAX_CPUS];
#define cpu_get_freq(cpu) cpu_hz[cpu]
#ifdef CONFIG_SMP
EXTERN int config_no_smp; /* optionaly turn off SMP */
EXTERN int config_no_smp; /* optionally turn off SMP */
#endif
/* VM */

View file

@ -8,7 +8,7 @@
* rm_irq_handler: deregister an interrupt handler.
* irq_handle: handle a hardware interrupt.
* called by the system dependent part when an
* external interrupt occures.
* external interrupt occurs.
* enable_irq: enable hook for IRQ.
* disable_irq: disable hook for IRQ.
*/

View file

@ -192,7 +192,7 @@ void kmain(kinfo_t *local_cbi)
/* Assign privilege structure. Force a static privilege id. */
(void) get_priv(rp, static_priv_id(proc_nr));
/* Priviliges for kernel tasks. */
/* Privileges for kernel tasks. */
if(proc_nr == VM_PROC_NR) {
priv(rp)->s_flags = VM_F;
priv(rp)->s_trap_mask = SRV_T;
@ -211,7 +211,7 @@ void kmain(kinfo_t *local_cbi)
ipc_to_m = TSK_M; /* allowed targets */
kcalls = TSK_KC; /* allowed kernel calls */
}
/* Priviliges for the root system process. */
/* Privileges for the root system process. */
else {
assert(isrootsysn(proc_nr));
priv(rp)->s_flags= RSYS_F; /* privilege flags */

View file

@ -117,7 +117,7 @@ void proc_init(void)
struct priv *sp;
int i;
/* Clear the process table. Anounce each slot as empty and set up
/* Clear the process table. Announce each slot as empty and set up
* mappings for proc_addr() and proc_nr() macros. Do the same for the
* table with privilege structures for the system processes.
*/
@ -175,7 +175,7 @@ static void idle(void)
/* This function is called whenever there is no work to do.
* Halt the CPU, and measure how many timestamp counter ticks are
* spent not doing anything. This allows test setups to measure
* the CPU utiliziation of certain workloads with high precision.
* the CPU utilization of certain workloads with high precision.
*/
p = get_cpulocal_var(proc_ptr) = get_cpulocal_var_ptr(idle_proc);
@ -637,7 +637,7 @@ endpoint_t src_dst_e; /* src or dst process */
{
/* Check for deadlock. This can happen if 'caller_ptr' and 'src_dst' have
* a cyclic dependency of blocking send and receive calls. The only cyclic
* depency that is not fatal is if the caller and target directly SEND(REC)
* dependency that is not fatal is if the caller and target directly SEND(REC)
* and RECEIVE to each other. If a deadlock is found, the group size is
* returned. Otherwise zero is returned.
*/
@ -993,7 +993,7 @@ static int mini_receive(struct proc * caller_ptr,
IPC_STATUS_ADD_CALL(caller_ptr, call);
/*
* if the message is originaly from the kernel on behalf of this
* if the message is originally from the kernel on behalf of this
* process, we must send the status flags accordingly
*/
if (sender->p_misc_flags & MF_SENDING_FROM_KERNEL) {

View file

@ -34,7 +34,7 @@ struct proc {
struct proc *p_scheduler; /* who should get out of quantum msg */
unsigned p_cpu; /* what CPU is the process running on */
#ifdef CONFIG_SMP
bitchunk_t p_cpu_mask[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* what CPUs is hte
bitchunk_t p_cpu_mask[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* what CPUs is the
process allowed to
run on */
bitchunk_t p_stale_tlb[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* On which cpu are
@ -84,7 +84,7 @@ struct proc {
* memory that isn't present, VM has to fix it. Until it has asked
* what needs to be done and fixed it, save necessary state here.
*
* The requestor gets a copy of its request message in reqmsg and gets
* The requester gets a copy of its request message in reqmsg and gets
* VMREQUEST set.
*/
struct {

View file

@ -44,7 +44,7 @@ void wait_for_APs_to_finish_booting(void)
BKL_UNLOCK();
while (ap_cpus_booted != (n - 1))
arch_pause();
/* now we have to take the lock again as we continu execution */
/* now we have to take the lock again as we continue execution */
BKL_LOCK();
}

View file

@ -13,7 +13,7 @@
EXTERN unsigned ncpus;
/* Number of virtual strands per physical core */
EXTERN unsigned ht_per_core;
/* which cpu is bootstraping */
/* which cpu is bootstrapping */
EXTERN unsigned bsp_cpu_id;
#define cpu_is_bsp(cpu) (bsp_cpu_id == cpu)
@ -62,7 +62,7 @@ void smp_ipi_sched_handler(void);
void smp_schedule(unsigned cpu);
/* stop a processes on a different cpu */
void smp_schedule_stop_proc(struct proc * p);
/* stop a process on a different cpu because its adress space is being changed */
/* stop a process on a different cpu because its address space is being changed */
void smp_schedule_vminhibit(struct proc * p);
/* stop the process and for saving its full context */
void smp_schedule_stop_proc_save_ctx(struct proc * p);

View file

@ -180,7 +180,7 @@ void system_init(void)
}
/* Initialize the call vector to a safe default handler. Some system calls
* may be disabled or nonexistant. Then explicitely map known calls to their
* may be disabled or nonexistant. Then explicitly map known calls to their
* handler functions. This is done with a macro that gives a compile error
* if an illegal call number is used. The ordering is not important here.
*/

View file

@ -58,7 +58,7 @@ int do_copy(struct proc * caller, message * m_ptr)
*/
for (i=_SRC_; i<=_DST_; i++) {
int p;
/* Check if process number was given implictly with SELF and is valid. */
/* Check if process number was given implicitly with SELF and is valid. */
if (vir_addr[i].proc_nr_e == SELF)
vir_addr[i].proc_nr_e = caller->p_endpoint;
if (vir_addr[i].proc_nr_e != NONE) {

View file

@ -94,7 +94,7 @@ int do_fork(struct proc * caller, message * m_ptr)
/* If the parent is a privileged process, take away the privileges from the
* child process and inhibit it from running by setting the NO_PRIV flag.
* The caller should explicitely set the new privileges before executing.
* The caller should explicitly set the new privileges before executing.
*/
if (priv(rpp)->s_flags & SYS_PROC) {
rpc->p_priv = priv_addr(USER_PRIV_ID);

View file

@ -38,7 +38,7 @@ int do_privctl(struct proc * caller, message * m_ptr)
struct priv priv;
int irq;
/* Check whether caller is allowed to make this call. Privileged proceses
/* Check whether caller is allowed to make this call. Privileged processes
* can only update the privileges of processes that are inhibited from
* running by the RTS_NO_PRIV flag. This flag is set when a privileged process
* forks.
@ -221,7 +221,7 @@ int do_privctl(struct proc * caller, message * m_ptr)
return r;
priv(rp)->s_flags |= CHECK_MEM; /* Check memory mappings */
/* When restarting a driver, check if it already has the premission */
/* When restarting a driver, check if it already has the permission */
for (i = 0; i < priv(rp)->s_nr_mem_range; i++) {
if (priv(rp)->s_mem_tab[i].mr_base == mem_range.mr_base &&
priv(rp)->s_mem_tab[i].mr_limit == mem_range.mr_limit)
@ -253,7 +253,7 @@ int do_privctl(struct proc * caller, message * m_ptr)
KERNEL, (vir_bytes) &irq, sizeof(irq));
priv(rp)->s_flags |= CHECK_IRQ; /* Check IRQs */
/* When restarting a driver, check if it already has the premission */
/* When restarting a driver, check if it already has the permission */
for (i = 0; i < priv(rp)->s_nr_irq; i++) {
if (priv(rp)->s_irq_tab[i] == irq)
return OK;

View file

@ -28,7 +28,7 @@ int do_vdevio(struct proc * caller, message * m_ptr)
/* Perform a series of device I/O on behalf of a non-kernel process. The
* I/O addresses and I/O values are fetched from and returned to some buffer
* in user space. The actual I/O is wrapped by lock() and unlock() to prevent
* that I/O batch from being interrrupted.
* that I/O batch from being interrupted.
* This is the counterpart of do_devio, which performs a single device I/O.
*/
int vec_size; /* size of vector */

View file

@ -1,5 +1,5 @@
/*
* This is arch independent NMI watchdog implementaion part. It is used to
* This is arch independent NMI watchdog implementation part. It is used to
* detect kernel lockups and help debugging. each architecture must add its own
* low level code that triggers periodic checks
*/

View file

@ -35,9 +35,9 @@ void arch_watchdog_stop(void);
/* if the watchdog detects lockup, let the arch code to handle it */
void arch_watchdog_lockup(const struct nmi_frame * frame);
/* generic NMI handler. Takes one agument which points to where the arch
/* generic NMI handler. Takes one argument which points to where the arch
* specific low level handler dumped CPU information and can be inspected by the
* arch specific code of the watchdog implementaion */
* arch specific code of the watchdog implementation */
void nmi_watchdog_handler(struct nmi_frame * frame);
#endif

View file

@ -30,7 +30,7 @@ EXTERN struct schedproc {
unsigned priority; /* the process' current priority */
unsigned time_slice; /* this process's time slice */
unsigned cpu; /* what CPU is the process running on */
bitchunk_t cpu_mask[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* what CPUs is hte
bitchunk_t cpu_mask[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* what CPUs is the
process allowed
to run on */
} schedproc[NR_PROCS];