Rewrite of process scheduling:

- current and maximum priority per process;
- quantum size and current ticks left per process;
- max number of full quantums in a row allow
  (otherwise current priority is decremented)
This commit is contained in:
Jorrit Herder 2005-06-30 15:55:19 +00:00
parent ebd38d9a92
commit bac6068857
15 changed files with 235 additions and 177 deletions

View file

@ -38,7 +38,7 @@ clean:
depend:
cd system && $(MAKE) -$(MAKEFLAGS) $@
/usr/bin/mkdep "$(CC) -E $(CPPFLAGS)" *.c system/*.c > .depend
/usr/bin/mkdep "$(CC) -E $(CPPFLAGS)" *.c > .depend
# Include generated dependencies.
include .depend

View file

@ -44,10 +44,6 @@ FORWARD _PROTOTYPE( int clock_handler, (irq_hook_t *hook) );
FORWARD _PROTOTYPE( int do_clocktick, (message *m_ptr) );
/* Constant definitions. */
#define SCHED_RATE (MILLISEC*HZ/1000) /* number of ticks per schedule */
#define MILLISEC 100 /* how often to call the scheduler */
/* Clock parameters. */
#if (CHIP == INTEL)
#define COUNTER_FREQ (2*TIMER_FREQ) /* counter frequency using square wave */
@ -84,8 +80,6 @@ PRIVATE clock_t realtime; /* real time clock */
/* Variables for and changed by the CLOCK's interrupt handler. */
PRIVATE irq_hook_t clock_hook;
PRIVATE int sched_ticks = SCHED_RATE; /* counter: when 0, call scheduler */
PRIVATE struct proc *prev_ptr; /* last user process run by clock */
/*===========================================================================*
@ -147,12 +141,11 @@ message *m_ptr; /* pointer to request message */
TMR_NEVER : clock_timers->tmr_exp_time;
}
/* If a process has been running too long, pick another one. */
if (--sched_ticks <= 0) {
if (bill_ptr == prev_ptr)
lock_sched(USER_Q); /* process has run too long */
sched_ticks = SCHED_RATE; /* reset quantum */
prev_ptr = bill_ptr; /* new previous process */
/* A process used up a full quantum. The interrupt handler stored this
* process in 'prev_ptr'. Reset the quantum and schedule another process.
*/
if (prev_ptr->p_sched_ticks <= 0) {
lock_sched(prev_ptr);
}
/* Inhibit sending a reply. */
@ -183,8 +176,7 @@ irq_hook_t *hook;
* These are used for accounting. It does not matter if proc.c
* is changing them, provided they are always valid pointers,
* since at worst the previous process would be billed.
* next_timeout, realtime, sched_ticks, bill_ptr, prev_ptr
* rdy_head[USER_Q]
* next_timeout, realtime, sched_ticks, bill_ptr, prev_ptr,
* These are tested to decide whether to call notify(). It
* does not matter if the test is sometimes (rarely) backwards
* due to a race, since this will only delay the high-level
@ -211,32 +203,28 @@ irq_hook_t *hook;
/* Acknowledge the PS/2 clock interrupt. */
if (machine.ps_mca) outb(PORT_B, inb(PORT_B) | CLOCK_ACK_BIT);
/* Get number of ticks and update realtime. */
ticks = lost_ticks + 1;
lost_ticks = 0;
realtime += ticks;
/* Update user and system accounting times. Charge the current process for
* user time. If the current process is not billable, that is, if a non-user
* process is running, charge the billable process for system time as well.
* Thus the unbillable process' user time is the billable user's system time.
*/
ticks = lost_ticks + 1;
lost_ticks = 0;
realtime += ticks;
/* Update administration. */
proc_ptr->p_user_time += ticks;
if (proc_ptr != bill_ptr) bill_ptr->p_sys_time += ticks;
if (proc_ptr->p_flags & PREEMPTIBLE) proc_ptr->p_sched_ticks -= ticks;
/* Check if do_clocktick() must be called. Done for alarms and scheduling.
* If bill_ptr == prev_ptr, there are no ready users so don't need sched().
* Some processes, such as the kernel tasks, cannot be preempted.
*/
if (next_timeout <= realtime || (sched_ticks == 1 && bill_ptr == prev_ptr
&& rdy_head[USER_Q] != NIL_PROC))
{
if ((next_timeout <= realtime) || (proc_ptr->p_sched_ticks <= 0)) {
prev_ptr = proc_ptr; /* store running process */
m.NOTIFY_TYPE = HARD_INT;
lock_notify(CLOCK, &m);
lock_notify(CLOCK, &m); /* send event notification */
}
else if (--sched_ticks <= 0) {
sched_ticks = SCHED_RATE; /* reset the quantum */
prev_ptr = bill_ptr; /* new previous process */
}
return(1); /* reenable clock interrupts */
}
@ -246,9 +234,7 @@ irq_hook_t *hook;
*===========================================================================*/
PUBLIC clock_t get_uptime()
{
/* Get and return the current clock uptime in ticks.
* Be careful about pending_ticks.
*/
/* Get and return the current clock uptime in ticks. */
return(realtime);
}

View file

@ -27,6 +27,10 @@
/* How long should the process names be in the kernel? */
#define P_NAME_LEN 8
/* Scheduling quantum. Number of ticks before preemption. */
#define SCHED_MILLISEC 100 /* rate to call scheduler */
#define SCHED_TICKS (SCHED_MILLISEC*HZ/1000) /* ticks per schedule */
/* How many bytes should the circular buffer for kernel diagnostics. */
#define KMESS_BUF_SIZE 256

View file

@ -21,10 +21,12 @@ EXTERN struct kmessages kmess; /* diagnostic messages in kernel */
EXTERN struct randomness krandom; /* gather kernel random information */
/* Process scheduling information and the kernel reentry count. */
EXTERN struct proc *prev_ptr; /* previously running process */
EXTERN struct proc *proc_ptr; /* pointer to currently running process */
EXTERN struct proc *next_ptr; /* next process to run after restart() */
EXTERN struct proc *bill_ptr; /* process to bill for clock ticks */
EXTERN char k_reenter; /* kernel reentry count (entry count less 1) */
EXTERN int sched_ticks; /* keep track of quantum usage */
EXTERN unsigned lost_ticks; /* clock ticks counted outside clock task */
/* Declare buffer space and a bit map for notification messages. */

View file

@ -39,7 +39,7 @@ PUBLIC void main()
vir_clicks text_clicks;
vir_clicks data_clicks;
reg_t ktsb; /* kernel task stack base */
struct system_image *ttp;
struct system_image *ip; /* boot image pointer */
struct exec e_hdr; /* for a copy of an a.out header */
/* Initialize the interrupt controller. */
@ -49,7 +49,7 @@ PUBLIC void main()
* for proc_addr() and proc_nr() macros.
*/
for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) {
rp->p_flags = SLOT_FREE; /* initialize free slot */
rp->p_rts_flags = SLOT_FREE; /* initialize free slot */
rp->p_nr = i; /* proc number from ptr */
(pproc_addr + NR_TASKS)[i] = rp; /* proc ptr from number */
}
@ -66,19 +66,24 @@ PUBLIC void main()
ktsb = (reg_t) t_stack;
for (i=0; i < IMAGE_SIZE; ++i) {
ttp = &image[i]; /* t's task attributes */
rp = proc_addr(ttp->proc_nr); /* t's process slot */
kstrncpy(rp->p_name, ttp->proc_name, P_NAME_LEN); /* set name */
ip = &image[i]; /* t's task attributes */
rp = proc_addr(ip->proc_nr); /* t's process slot */
kstrncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set name */
rp->p_name[P_NAME_LEN-1] = '\0'; /* just for safety */
rp->p_priority = ttp->priority; /* scheduling priority */
rp->p_call_mask = ttp->call_mask; /* allowed system calls */
rp->p_sendmask = ttp->sendmask; /* sendmask protection */
rp->p_flags = ip->flags; /* process flags */
rp->p_max_priority = ip->priority; /* max scheduling priority */
rp->p_priority = ip->priority; /* current priority */
rp->p_quantum_size = ip->quantum; /* quantum size in ticks */
rp->p_sched_ticks = ip->quantum; /* current credit */
rp->p_full_quantums = QUANTUMS(ip->priority); /* quantums left */
rp->p_call_mask = ip->call_mask; /* allowed system calls */
rp->p_sendmask = ip->sendmask; /* sendmask protection */
if (i-NR_TASKS < 0) { /* part of the kernel? */
if (ttp->stksize > 0) { /* HARDWARE stack size is 0 */
if (ip->stksize > 0) { /* HARDWARE stack size is 0 */
rp->p_stguard = (reg_t *) ktsb;
*rp->p_stguard = STACK_GUARD;
}
ktsb += ttp->stksize; /* point to high end of stack */
ktsb += ip->stksize; /* point to high end of stack */
rp->p_reg.sp = ktsb; /* this task's initial stack ptr */
text_base = kinfo.code_base >> CLICK_SHIFT;
/* processes that are in the kernel */
@ -108,7 +113,7 @@ PUBLIC void main()
* is different from that of other processes because tasks can
* access I/O; this is not allowed to less-privileged processes
*/
rp->p_reg.pc = (reg_t) ttp->initial_pc;
rp->p_reg.pc = (reg_t) ip->initial_pc;
rp->p_reg.psw = (iskernelp(rp)) ? INIT_TASK_PSW : INIT_PSW;
/* Initialize the server stack pointer. Take it down one word
@ -125,7 +130,7 @@ PUBLIC void main()
rp->p_ready = 0;
#endif
if (rp->p_nr != HARDWARE) lock_ready(rp);
rp->p_flags = 0;
rp->p_rts_flags = 0;
/* Code and data segments must be allocated in protected mode. */
alloc_segments(rp);

View file

@ -386,7 +386,7 @@ _restart:
cmp (_next_ptr), 0 ! see if another process is scheduled
jz 0f
mov eax, (_next_ptr)
mov (_proc_ptr), eax
mov (_proc_ptr), eax ! schedule new process
mov (_next_ptr), 0
0: mov esp, (_proc_ptr) ! will assume P_STACKBASE == 0
lldt P_LDT_SEL(esp) ! enable process' segment descriptors

View file

@ -42,6 +42,7 @@
#include <minix/callnr.h>
#include <minix/com.h>
#include "proc.h"
#include "const.h"
#include "debug.h"
#include "ipc.h"
#include "sendmask.h"
@ -60,7 +61,7 @@ FORWARD _PROTOTYPE( int mini_notify, (struct proc *caller_ptr, int dst,
FORWARD _PROTOTYPE( void ready, (struct proc *rp) );
FORWARD _PROTOTYPE( void unready, (struct proc *rp) );
FORWARD _PROTOTYPE( void sched, (int queue) );
FORWARD _PROTOTYPE( void sched, (struct proc *rp) );
FORWARD _PROTOTYPE( void pick_proc, (void) );
#define BuildMess(m,n) \
@ -107,7 +108,7 @@ message *m_ptr; /* pointer to message in the caller's space */
* if the caller doesn't do receive().
*/
if (! (caller_ptr->p_call_mask & (1 << function)) ||
(iskerneltask(src_dst) && function != SENDREC))
(iskerneln(src_dst) && function != SENDREC))
return(ECALLDENIED);
/* Require a valid source and/ or destination process, unless echoing. */
@ -146,7 +147,7 @@ message *m_ptr; /* pointer to message in the caller's space */
case SENDREC: /* has FRESH_ANSWER flag */
/* fall through */
case SEND:
if (isemptyp(proc_addr(src_dst))) {
if (isemptyn(src_dst)) {
result = EDEADDST; /* cannot send to the dead */
break;
}
@ -179,6 +180,16 @@ message *m_ptr; /* pointer to message in the caller's space */
default:
result = EBADCALL; /* illegal system call */
}
/* If the caller made a successfull, blocking system call it's priority may
* be raised. The priority have been lowered if a process consumed to many
* full quantums in a row to prevent damage from infinite loops
*/
if ((caller_ptr->p_priority > caller_ptr->p_max_priority) &&
! (flags & NON_BLOCKING) && (result == OK)) {
caller_ptr->p_priority = caller_ptr->p_max_priority;
caller_ptr->p_full_quantums = QUANTUMS(caller_ptr->p_priority);
}
/* Now, return the result of the system call to the caller. */
return(result);
@ -204,7 +215,7 @@ unsigned flags; /* system call flags */
/* Check for deadlock by 'caller_ptr' and 'dst' sending to each other. */
xp = dst_ptr;
while (xp->p_flags & SENDING) { /* check while sending */
while (xp->p_rts_flags & SENDING) { /* check while sending */
xp = proc_addr(xp->p_sendto); /* get xp's destination */
if (xp == caller_ptr) return(ELOCKED); /* deadlock if cyclic */
}
@ -212,17 +223,17 @@ unsigned flags; /* system call flags */
/* Check if 'dst' is blocked waiting for this message. The destination's
* SENDING flag may be set when its SENDREC call blocked while sending.
*/
if ( (dst_ptr->p_flags & (RECEIVING | SENDING)) == RECEIVING &&
if ( (dst_ptr->p_rts_flags & (RECEIVING | SENDING)) == RECEIVING &&
(dst_ptr->p_getfrom == ANY || dst_ptr->p_getfrom == caller_ptr->p_nr)) {
/* Destination is indeed waiting for this message. */
CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, dst_ptr,
dst_ptr->p_messbuf);
if ((dst_ptr->p_flags &= ~RECEIVING) == 0) ready(dst_ptr);
if ((dst_ptr->p_rts_flags &= ~RECEIVING) == 0) ready(dst_ptr);
} else if ( ! (flags & NON_BLOCKING)) {
/* Destination is not waiting. Block and queue caller. */
caller_ptr->p_messbuf = m_ptr;
if (caller_ptr->p_flags == 0) unready(caller_ptr);
caller_ptr->p_flags |= SENDING;
if (caller_ptr->p_rts_flags == 0) unready(caller_ptr);
caller_ptr->p_rts_flags |= SENDING;
caller_ptr->p_sendto = dst;
/* Process is now blocked. Put in on the destination's queue. */
@ -258,7 +269,7 @@ unsigned flags; /* system call flags */
* The caller's SENDING flag may be set if SENDREC couldn't send. If it is
* set, the process should be blocked.
*/
if (!(caller_ptr->p_flags & SENDING)) {
if (!(caller_ptr->p_rts_flags & SENDING)) {
/* Check caller queue. Use pointer pointers to keep code simple. */
xpp = &caller_ptr->p_caller_q;
@ -266,7 +277,7 @@ unsigned flags; /* system call flags */
if (src == ANY || src == proc_nr(*xpp)) {
/* Found acceptable message. Copy it and update status. */
CopyMess((*xpp)->p_nr, *xpp, (*xpp)->p_messbuf, caller_ptr, m_ptr);
if (((*xpp)->p_flags &= ~SENDING) == 0) ready(*xpp);
if (((*xpp)->p_rts_flags &= ~SENDING) == 0) ready(*xpp);
*xpp = (*xpp)->p_q_link; /* remove from queue */
return(OK); /* report success */
}
@ -300,8 +311,8 @@ unsigned flags; /* system call flags */
if ( ! (flags & NON_BLOCKING)) {
caller_ptr->p_getfrom = src;
caller_ptr->p_messbuf = m_ptr;
if (caller_ptr->p_flags == 0) unready(caller_ptr);
caller_ptr->p_flags |= RECEIVING;
if (caller_ptr->p_rts_flags == 0) unready(caller_ptr);
caller_ptr->p_rts_flags |= RECEIVING;
return(OK);
} else {
return(ENOTREADY);
@ -326,14 +337,14 @@ message *m_ptr; /* pointer to message buffer */
/* Check to see if target is blocked waiting for this message. A process
* can be both sending and receiving during a SENDREC system call.
*/
if ( (dst_ptr->p_flags & (RECEIVING|SENDING)) == RECEIVING &&
if ( (dst_ptr->p_rts_flags & (RECEIVING|SENDING)) == RECEIVING &&
(dst_ptr->p_getfrom == ANY || dst_ptr->p_getfrom == caller_ptr->p_nr)) {
/* Destination is indeed waiting for this message. */
CopyMess(proc_nr(caller_ptr), caller_ptr, m_ptr,
dst_ptr, dst_ptr->p_messbuf);
dst_ptr->p_flags &= ~RECEIVING; /* deblock destination */
if (dst_ptr->p_flags == 0) ready(dst_ptr);
dst_ptr->p_rts_flags &= ~RECEIVING; /* deblock destination */
if (dst_ptr->p_rts_flags == 0) ready(dst_ptr);
return(OK);
}
@ -404,33 +415,6 @@ message *m_ptr; /* pointer to message buffer */
}
/*===========================================================================*
* pick_proc *
*===========================================================================*/
PRIVATE void pick_proc()
{
/* Decide who to run now. A new process is selected by setting 'next_ptr'.
* When a fresh user (or idle) process is selected, record it in 'bill_ptr',
* so the clock task can tell who to bill for system time.
*/
register struct proc *rp; /* process to run */
int q; /* iterate over queues */
/* Check each of the scheduling queues for ready processes. The number of
* queues is defined in proc.h, and priorities are set in the task table.
* The lowest queue contains IDLE, which is always ready.
*/
for (q=0; q < NR_SCHED_QUEUES; q++) {
if ( (rp = rdy_head[q]) != NIL_PROC) {
next_ptr = rp; /* run process 'rp' next */
if (isuserp(rp) || isidlep(rp)) /* possible bill 'rp' */
bill_ptr = rp;
return;
}
}
}
/*===========================================================================*
* ready *
*===========================================================================*/
@ -455,7 +439,7 @@ register struct proc *rp; /* this process is now runnable */
rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
rp->p_nextready = NIL_PROC; /* mark new end */
}
else if (isuserp(rp)) { /* add to head of queue */
else if (rp->p_flags & SCHED_Q_HEAD) { /* add to head of queue */
rp->p_nextready = rdy_head[q]; /* chain head of queue */
rdy_head[q] = rp; /* set new queue head */
}
@ -464,13 +448,7 @@ register struct proc *rp; /* this process is now runnable */
rdy_tail[q] = rp; /* set new queue tail */
rp->p_nextready = NIL_PROC; /* mark new end */
}
/* Run 'rp' next if it has a higher priority than 'proc_ptr' or 'next_ptr'.
* This actually should be done via pick_proc(), but the message passing
* functions rely on this side-effect. High priorities have a lower number.
*/
if (next_ptr && next_ptr->p_priority > rp->p_priority) next_ptr = rp;
else if (proc_ptr->p_priority > rp->p_priority) next_ptr = rp;
pick_proc(); /* select next to run */
}
/*===========================================================================*
@ -520,22 +498,89 @@ register struct proc *rp; /* this process is no longer runnable */
/*===========================================================================*
* sched *
*===========================================================================*/
PRIVATE void sched(q)
int q; /* scheduling queue to use */
PRIVATE void sched(sched_ptr)
struct proc *sched_ptr; /* quantum eating process */
{
/* The current process has run too long. If another low priority (user)
* process is runnable, put the current process on the end of the user queue,
* possibly promoting another user to head of the queue.
int q;
/* Check if this process is preemptible, otherwise leave it as is. */
if (! (sched_ptr->p_flags & PREEMPTIBLE)) {
kprintf("Warning, sched for nonpreemptible proc %d\n", sched_ptr->p_nr);
return;
}
if (sched_ptr->p_nr == IS_PROC_NR) {
kprintf("Scheduling IS: pri: %d, ", sched_ptr->p_priority);
kprintf("qua %d", sched_ptr->p_full_quantums);
}
/* Process exceeded the maximum number of full quantums it is allowed
* to use in a row. Lower the process' priority, but make sure we don't
* end up in the IDLE queue. This helps to limit the damage caused by
* for example infinite loops in high-priority processes.
* This is a rare situation, so the overhead is acceptable.
*/
if (-- sched_ptr->p_full_quantums <= 0) { /* exceeded threshold */
if (sched_ptr->p_priority + 1 < IDLE_Q ) {
unready(sched_ptr); /* remove from queues */
sched_ptr->p_priority ++; /* lower priority */
ready(sched_ptr); /* add to new queue */
kprintf("Warning, proc %d got lower priority:\n", sched_ptr->p_nr);
kprintf("%d\n", sched_ptr->p_priority);
}
sched_ptr->p_full_quantums = QUANTUMS(sched_ptr->p_priority);
}
/* The current process has run too long. If another low priority (user)
* process is runnable, put the current process on the tail of its queue,
* possibly promoting another user to head of the queue. Don't do anything
* if the queue is empty, or the process to be scheduled is not the head.
*/
q = sched_ptr->p_priority; /* convenient shorthand */
if (rdy_head[q] == sched_ptr) {
rdy_tail[q]->p_nextready = rdy_head[q]; /* add expired to end */
rdy_tail[q] = rdy_head[q]; /* set new queue tail */
rdy_head[q] = rdy_head[q]->p_nextready; /* set new queue head */
rdy_tail[q]->p_nextready = NIL_PROC; /* mark new queue end */
}
/* Give the expired process a new quantum and see who is next to run. */
sched_ptr->p_sched_ticks = sched_ptr->p_quantum_size;
pick_proc();
if (sched_ptr->p_nr == IS_PROC_NR) {
kprintf("Next proc: %d, ", next_ptr->p_nr);
kprintf("pri: %d, ", next_ptr->p_priority);
kprintf("qua: %d\n", next_ptr->p_full_quantums);
}
}
/*===========================================================================*
* pick_proc *
*===========================================================================*/
PRIVATE void pick_proc()
{
/* Decide who to run now. A new process is selected by setting 'next_ptr'.
* When a billable process is selected, record it in 'bill_ptr', so that the
* clock task can tell who to bill for system time.
*/
if (rdy_head[q] == NIL_PROC) return; /* return for empty queue */
register struct proc *rp; /* process to run */
int q; /* iterate over queues */
/* One or more user processes queued. */
rdy_tail[q]->p_nextready = rdy_head[q]; /* add expired to end */
rdy_tail[q] = rdy_head[q]; /* set new queue tail */
rdy_head[q] = rdy_head[q]->p_nextready; /* set new queue head */
rdy_tail[q]->p_nextready = NIL_PROC; /* mark new queue end */
pick_proc(); /* select next to run */
/* Check each of the scheduling queues for ready processes. The number of
* queues is defined in proc.h, and priorities are set in the task table.
* The lowest queue contains IDLE, which is always ready.
*/
for (q=0; q < NR_SCHED_QUEUES; q++) {
if ( (rp = rdy_head[q]) != NIL_PROC) {
next_ptr = rp; /* run process 'rp' next */
if (rp->p_flags & BILLABLE)
bill_ptr = rp; /* bill for system time */
return;
}
}
}
@ -582,12 +627,12 @@ struct proc *rp; /* this process is no longer runnable */
/*==========================================================================*
* lock_sched *
*==========================================================================*/
PUBLIC void lock_sched(queue)
int queue;
PUBLIC void lock_sched(sched_ptr)
struct proc *sched_ptr;
{
/* Safe gateway to sched() for tasks. */
lock(5, "sched");
sched(queue);
sched(sched_ptr);
unlock(5);
}

View file

@ -27,19 +27,21 @@ struct proc {
reg_t *p_stguard; /* stack guard word */
proc_nr_t p_nr; /* number of this process (for fast access) */
char p_flags; /* PREEMTIBLE, BILLABLE, etc. */
char p_rts_flags; /* SENDING, RECEIVING, etc. */
struct mem_map p_memmap[NR_LOCAL_SEGS]; /* local memory map (T, D, S) */
struct far_mem p_farmem[NR_REMOTE_SEGS]; /* remote memory map */
char p_flags; /* SENDING, RECEIVING, etc. */
char p_priority; /* current scheduling priority */
char p_max_priority; /* maximum (default) scheduling priority */
char p_used_quantums; /* number of full quantums used in a row */
char p_allowed_quantums; /* maximum quantums allowed in a row */
char p_max_priority; /* maximum scheduling priority */
char p_quantum_size; /* quantum size in ticks */
char p_sched_ticks; /* number of scheduling ticks left */
char p_full_quantums; /* number of full quantums left */
char p_call_mask; /* bit map with allowed system call traps */
send_mask_t p_sendmask; /* mask indicating to whom proc may send */
struct mem_map p_memmap[NR_LOCAL_SEGS]; /* local memory map (T, D, S) */
struct far_mem p_farmem[NR_REMOTE_SEGS]; /* remote memory map */
clock_t p_user_time; /* user time in ticks */
clock_t p_sys_time; /* sys time in ticks */
@ -64,7 +66,7 @@ struct proc {
/* Guard word for task stacks. */
#define STACK_GUARD ((reg_t) (sizeof(reg_t) == 2 ? 0xBEEF : 0xDEADBEEF))
/* Bits for the process flags. A process is runnable iff p_flags == 0. */
/* Bits for the runtime flags. A process is runnable iff p_rts_flags == 0. */
#define SLOT_FREE 0x01 /* process slot is free */
#define NO_MAP 0x02 /* keeps unmapped forked child from running */
#define SENDING 0x04 /* process blocked trying to SEND */
@ -73,6 +75,10 @@ struct proc {
#define SIG_PENDING 0x20 /* unready while signal being processed */
#define P_STOP 0x40 /* set when process is being traced */
/* Bits for the other process flags. */
#define PREEMPTIBLE 0x01 /* kernel tasks are not preemptible */
#define SCHED_Q_HEAD 0x02 /* add to queue head instead of tail */
#define BILLABLE 0x04 /* system services are not billable */
/* Scheduling priorities for p_priority. Values must start at zero (highest
* priority) and increment. Priorities of the processes in the boot image can
@ -83,6 +89,8 @@ struct proc {
#define USER_Q 4 /* default priority for user processes */
#define IDLE_Q 7 /* lowest, only IDLE process goes here */
/* Each queue has a maximum number of full quantums associated with it. */
#define QUANTUMS(q) (NR_SCHED_QUEUES - (q))
/* Magic process table addresses. */
#define BEG_PROC_ADDR (&proc[0])
@ -94,14 +102,14 @@ struct proc {
#define proc_addr(n) (pproc_addr + NR_TASKS)[(n)]
#define proc_nr(p) ((p)->p_nr)
#define iskerneltask(n) ((n) == CLOCK || (n) == SYSTASK)
#define isokprocn(n) ((unsigned) ((n) + NR_TASKS) < NR_PROCS + NR_TASKS)
#define isokprocp(p) ((p) >= BEG_PROC_ADDR && (p) < END_PROC_ADDR)
#define isemptyn(n) isemptyp(proc_addr(n))
#define isemptyp(p) ((p)->p_rts_flags == SLOT_FREE)
#define iskernelp(p) iskerneln((p)->p_nr)
#define iskerneln(n) ((n) < 0)
#define isuserp(p) isusern((p)->p_nr)
#define isusern(n) ((n) >= 0)
#define iskernelp(p) ((p)->p_nr < 0)
#define isuserp(p) ((p)->p_nr >= 0)
#define isidlep(p) ((p)->p_nr == IDLE)
#define isemptyp(p) ((p)->p_flags == SLOT_FREE)
/* The process table and pointers to process table slots. The pointers allow
* faster access because now a process entry can be found by indexing the

View file

@ -43,7 +43,7 @@ _PROTOTYPE( int sys_call, (int function, int src_dest, message *m_ptr) );
_PROTOTYPE( int lock_notify, (int dst, message *m_ptr) );
_PROTOTYPE( int lock_send, (int dst, message *m_ptr) );
_PROTOTYPE( void lock_ready, (struct proc *rp) );
_PROTOTYPE( void lock_sched, (int queue) );
_PROTOTYPE( void lock_sched, (struct proc *rp) );
_PROTOTYPE( void lock_unready, (struct proc *rp) );
/* start.c */

View file

@ -186,14 +186,14 @@ int proc_nr; /* slot of process to clean up */
reset_timer(&rc->p_alarm_timer);
/* Make sure the exiting process is no longer scheduled. */
if (rc->p_flags == 0) lock_unready(rc);
if (rc->p_rts_flags == 0) lock_unready(rc);
/* If the process being terminated happens to be queued trying to send a
* message (e.g., the process was killed by a signal, rather than it doing
* an exit or it is forcibly shutdown in the stop sequence), then it must
* be removed from the message queues.
*/
if (rc->p_flags & SENDING) {
if (rc->p_rts_flags & SENDING) {
/* Check all proc slots to see if the exiting process is queued. */
for (rp = BEG_PROC_ADDR; rp < END_PROC_ADDR; rp++) {
if (rp->p_caller_q == NIL_PROC) continue;
@ -225,7 +225,7 @@ int proc_nr; /* slot of process to clean up */
/* Now clean up the process table entry. Reset to defaults. */
kstrncpy(rc->p_name, "<none>", P_NAME_LEN); /* unset name */
sigemptyset(&rc->p_pending); /* remove pending signals */
rc->p_flags = SLOT_FREE; /* announce slot empty */
rc->p_rts_flags = SLOT_FREE; /* announce slot empty */
rc->p_sendmask = DENY_ALL_MASK; /* set most restrictive mask */
#if (CHIP == M68000)
@ -299,13 +299,12 @@ int sig_nr; /* signal to be sent, 1 to _NSIG */
rp = proc_addr(proc_nr);
if (! sigismember(&rp->p_pending, sig_nr)) {
sigaddset(&rp->p_pending, sig_nr);
if (rp->p_flags & SIGNALED) return; /* other signal pending */
if (rp->p_flags == 0) lock_unready(rp); /* unready if not yet done */
rp->p_flags |= SIGNALED | SIG_PENDING; /* update signal flags */
m.NOTIFY_TYPE = KSIG_PENDING;
m.NOTIFY_ARG = 0;
m.NOTIFY_FLAGS = 0;
lock_notify(PM_PROC_NR, &m);
if (! (rp->p_rts_flags & SIGNALED)) { /* other pending */
if (rp->p_rts_flags == 0) lock_unready(rp); /* make not ready */
rp->p_rts_flags |= SIGNALED | SIG_PENDING; /* update flags */
m.NOTIFY_TYPE = KSIG_PENDING;
lock_notify(PM_PROC_NR, &m);
}
}
}

View file

@ -49,8 +49,8 @@ register message *m_ptr; /* pointer to request message */
rpc->p_ntf_q = NULL; /* remove pending notifications */
/* Only one in group should have SIGNALED, child doesn't inherit tracing. */
rpc->p_flags |= NO_MAP; /* inhibit process from running */
rpc->p_flags &= ~(SIGNALED | SIG_PENDING | P_STOP);
rpc->p_rts_flags |= NO_MAP; /* inhibit process from running */
rpc->p_rts_flags &= ~(SIGNALED | SIG_PENDING | P_STOP);
sigemptyset(&rpc->p_pending);
rpc->p_reg.retreg = 0; /* child sees pid = 0 to know it is child */
@ -103,9 +103,9 @@ message *m_ptr; /* pointer to request message */
#else
pmmu_init_proc(rp);
#endif
old_flags = rp->p_flags; /* save the previous value of the flags */
rp->p_flags &= ~NO_MAP;
if (old_flags != 0 && rp->p_flags == 0) lock_ready(rp);
old_flags = rp->p_rts_flags; /* save the previous value of the flags */
rp->p_rts_flags &= ~NO_MAP;
if (old_flags != 0 && rp->p_rts_flags == 0) lock_ready(rp);
return(OK);
}
@ -151,8 +151,8 @@ register message *m_ptr; /* pointer to request message */
(LDT_SIZE - EXTRA_LDT_INDEX) * sizeof(rp->p_ldt[0]));
#endif
rp->p_reg.pc = (reg_t) m_ptr->PR_IP_PTR; /* set pc */
rp->p_flags &= ~RECEIVING; /* PM does not reply to EXEC call */
if (rp->p_flags == 0) lock_ready(rp);
rp->p_rts_flags &= ~RECEIVING; /* PM does not reply to EXEC call */
if (rp->p_rts_flags == 0) lock_ready(rp);
/* Save command name for debugging, ps(1) output, etc. */
phys_name = numap_local(m_ptr->m_source, (vir_bytes) m_ptr->PR_NAME_PTR,

View file

@ -33,11 +33,11 @@ message *m_ptr; /* pointer to request message */
/* Find the next process with pending signals. */
for (rp = BEG_USER_ADDR; rp < END_PROC_ADDR; rp++) {
if (rp->p_flags & SIGNALED) {
if (rp->p_rts_flags & SIGNALED) {
m_ptr->SIG_PROC = rp->p_nr;
m_ptr->SIG_MAP = rp->p_pending;
sigemptyset(&rp->p_pending); /* ball is in PM's court */
rp->p_flags &= ~SIGNALED; /* blocked by SIG_PENDING */
rp->p_rts_flags &= ~SIGNALED; /* blocked by SIG_PENDING */
return(OK);
}
}
@ -60,8 +60,8 @@ message *m_ptr; /* pointer to request message */
if (isemptyp(rp)) return(EINVAL); /* process already dead? */
/* PM has finished one kernel signal. Perhaps process is ready now? */
if (! (rp->p_flags & SIGNALED)) /* new signal arrived */
if ((rp->p_flags &= ~SIG_PENDING) == 0) /* remove pending flag */
if (! (rp->p_rts_flags & SIGNALED)) /* new signal arrived */
if ((rp->p_rts_flags &= ~SIG_PENDING)==0) /* remove pending flag */
lock_ready(rp); /* ready if no flags */
return(OK);
}

View file

@ -51,8 +51,8 @@ register message *m_ptr;
if (isemptyp(rp)) return(EIO);
switch (tr_request) {
case T_STOP: /* stop process */
if (rp->p_flags == 0) lock_unready(rp);
rp->p_flags |= P_STOP;
if (rp->p_rts_flags == 0) lock_unready(rp);
rp->p_rts_flags |= P_STOP;
rp->p_reg.psw &= ~TRACEBIT; /* clear trace bit */
return(OK);
@ -120,15 +120,15 @@ register message *m_ptr;
break;
case T_RESUME: /* resume execution */
rp->p_flags &= ~P_STOP;
if (rp->p_flags == 0) lock_ready(rp);
rp->p_rts_flags &= ~P_STOP;
if (rp->p_rts_flags == 0) lock_ready(rp);
tr_data = 0;
break;
case T_STEP: /* set trace bit */
rp->p_reg.psw |= TRACEBIT;
rp->p_flags &= ~P_STOP;
if (rp->p_flags == 0) lock_ready(rp);
rp->p_rts_flags &= ~P_STOP;
if (rp->p_rts_flags == 0) lock_ready(rp);
tr_data = 0;
break;

View file

@ -39,17 +39,17 @@
/* Define stack sizes for all tasks included in the system image. */
#define NO_STACK 0
#define SMALL_STACK (128 * sizeof(char *))
#if (CHIP == INTEL)
#define IDLE_STACK ((3+3+4) * sizeof(char *)) /* 3 intr, 3 temps, 4 db */
#if (CHIP == INTEL) /* 3 intr, 3 temps, 4 db */
#define IDLE_S ((3+3+4) * sizeof(char *))
#else
#define IDLE_STACK SMALL_STACK
#define IDLE_S SMALL_STACK
#endif
#define HARDWARE_STACK NO_STACK /* dummy task, uses kernel stack */
#define SYS_STACK SMALL_STACK
#define CLOCK_STACK SMALL_STACK
#define HARDWARE_S NO_STACK /* dummy task, uses kernel stack */
#define SYSTEM_S SMALL_STACK
#define CLOCK_S SMALL_STACK
/* Stack space for all the task stacks. Declared as (char *) to align it. */
#define TOT_STACK_SPACE (IDLE_STACK+HARDWARE_STACK+CLOCK_STACK+SYS_STACK)
#define TOT_STACK_SPACE (IDLE_S+HARDWARE_S+CLOCK_S+SYSTEM_S)
PUBLIC char *t_stack[TOT_STACK_SPACE / sizeof(char *)];
@ -60,35 +60,43 @@ PUBLIC char *t_stack[TOT_STACK_SPACE / sizeof(char *)];
* mask, and a name for the process table. For kernel processes, the startup
* routine and stack size is also provided.
*/
#define IDLE_F (PREEMPTIBLE | BILLABLE)
#define USER_F (PREEMPTIBLE | SCHED_Q_HEAD)
#define SYS_F (PREEMPTIBLE)
#define IDLE_T 32 /* ticks */
#define USER_T 8 /* ticks */
#define SYS_T 16 /* ticks */
PUBLIC struct system_image image[] = {
{ IDLE, idle_task, 0, IDLE_Q, IDLE_STACK, EMPTY_CALL_MASK, DENY_ALL_MASK, "IDLE" },
{ CLOCK, clock_task, 0, TASK_Q, CLOCK_STACK, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "CLOCK" },
{ SYSTASK, sys_task, 0, TASK_Q, SYS_STACK, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "SYS" },
{ HARDWARE, 0, 0, TASK_Q, HARDWARE_STACK, EMPTY_CALL_MASK, ALLOW_ALL_MASK,"HARDW." },
{ PM_PROC_NR, 0, 0, 3, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "PM" },
{ FS_PROC_NR, 0, 0, 3, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "FS" },
{ IS_PROC_NR, 0, 0, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "IS" },
{ TTY, 0, 0, 1, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "TTY" },
{ MEMORY, 0, 0, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "MEMORY" },
{ IDLE, idle_task, IDLE_F, IDLE_T, IDLE_Q, IDLE_S, EMPTY_CALL_MASK, DENY_ALL_MASK, "IDLE" },
{ CLOCK, clock_task, 0, SYS_T, TASK_Q, CLOCK_S, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "CLOCK" },
{ SYSTASK, sys_task, 0, SYS_T, TASK_Q, SYSTEM_S, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "SYS" },
{ HARDWARE, 0, 0, SYS_T, TASK_Q, HARDWARE_S, EMPTY_CALL_MASK, ALLOW_ALL_MASK,"HARDW." },
{ PM_PROC_NR, 0, 0, SYS_T, 3, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "PM" },
{ FS_PROC_NR, 0, 0, SYS_T, 3, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "FS" },
{ IS_PROC_NR, 0, SYS_F, SYS_T, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "IS" },
{ TTY, 0, SYS_F, SYS_T, 1, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "TTY" },
{ MEMORY, 0, SYS_F, SYS_T, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "MEMORY" },
#if ENABLE_AT_WINI
{ AT_WINI, 0, 0, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "AT_WINI" },
{ AT_WINI, 0, SYS_F, SYS_T, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "AT_WINI" },
#endif
#if ENABLE_FLOPPY
{ FLOPPY, 0, 0, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "FLOPPY" },
{ FLOPPY, 0, SYS_F, SYS_T, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "FLOPPY" },
#endif
#if ENABLE_PRINTER
{ PRINTER, 0, 0, 3, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "PRINTER" },
{ PRINTER, 0, SYS_F, SYS_T, 3, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "PRINTER" },
#endif
#if ENABLE_RTL8139
{ USR8139, 0, 0, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "RTL8139" },
{ USR8139, 0, SYS_F, SYS_T, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "RTL8139" },
#endif
#if ENABLE_FXP
{ FXP, 0, 0, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "FXP" },
{ FXP, 0, SYS_F, SYS_T, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "FXP" },
#endif
#if ENABLE_DPETH
{ DPETH, 0, 0, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "DPETH" },
{ DPETH, 0, SYS_F, SYS_T, 2, 0, SYSTEM_CALL_MASK, ALLOW_ALL_MASK, "DPETH" },
#endif
{ INIT_PROC_NR, 0, 0, USER_Q, 0, USER_CALL_MASK, USER_PROC_SENDMASK, "INIT" },
{ INIT_PROC_NR, 0, USER_F, USER_T, USER_Q, 0, USER_CALL_MASK, USER_PROC_SENDMASK, "INIT" },
};
/* Verify the size of the system image table at compile time. If the number

View file

@ -19,7 +19,8 @@ typedef unsigned long send_mask_t; /* bit mask for sender */
struct system_image {
proc_nr_t proc_nr; /* process number to use */
task_t *initial_pc; /* start function for tasks */
int type; /* type of process */
int flags; /* process flags */
char quantum; /* quantum (tick count) */
int priority; /* scheduling priority */
int stksize; /* stack size for tasks */
char call_mask; /* allowed system calls */