2005-04-21 16:53:53 +02:00
|
|
|
/* This file contains essentially all of the process and message handling.
|
2005-05-30 13:05:42 +02:00
|
|
|
* Together with "mpx.s" it forms the lowest layer of the MINIX kernel.
|
|
|
|
* There is one entry point from the outside:
|
2005-04-21 16:53:53 +02:00
|
|
|
*
|
2005-08-29 18:47:18 +02:00
|
|
|
* sys_call: a system call, i.e., the kernel is trapped with an INT
|
2005-04-21 16:53:53 +02:00
|
|
|
*
|
|
|
|
* Changes:
|
2005-08-22 17:14:11 +02:00
|
|
|
* Aug 19, 2005 rewrote scheduling code (Jorrit N. Herder)
|
|
|
|
* Jul 25, 2005 rewrote system call handling (Jorrit N. Herder)
|
2005-08-19 18:43:28 +02:00
|
|
|
* May 26, 2005 rewrote message passing functions (Jorrit N. Herder)
|
|
|
|
* May 24, 2005 new notification system call (Jorrit N. Herder)
|
|
|
|
* Oct 28, 2004 nonblocking send and receive calls (Jorrit N. Herder)
|
2005-05-30 13:05:42 +02:00
|
|
|
*
|
|
|
|
* The code here is critical to make everything work and is important for the
|
|
|
|
* overall performance of the system. A large fraction of the code deals with
|
|
|
|
* list manipulation. To make this both easy to understand and fast to execute
|
|
|
|
* pointer pointers are used throughout the code. Pointer pointers prevent
|
|
|
|
* exceptions for the head or tail of a linked list.
|
|
|
|
*
|
|
|
|
* node_t *queue, *new_node; // assume these as global variables
|
|
|
|
* node_t **xpp = &queue; // get pointer pointer to head of queue
|
|
|
|
* while (*xpp != NULL) // find last pointer of the linked list
|
|
|
|
* xpp = &(*xpp)->next; // get pointer to next pointer
|
|
|
|
* *xpp = new_node; // now replace the end (the NULL pointer)
|
|
|
|
* new_node->next = NULL; // and mark the new end of the list
|
|
|
|
*
|
|
|
|
* For example, when adding a new node to the end of the list, one normally
|
|
|
|
* makes an exception for an empty list and looks up the end of the list for
|
|
|
|
* nonempty lists. As shown above, this is not required with pointer pointers.
|
2005-04-21 16:53:53 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <minix/com.h>
|
2010-07-16 17:36:29 +02:00
|
|
|
#include <minix/ipcconst.h>
|
2007-04-23 16:24:30 +02:00
|
|
|
#include <stddef.h>
|
2006-03-15 13:01:59 +01:00
|
|
|
#include <signal.h>
|
2010-03-10 14:00:05 +01:00
|
|
|
#include <assert.h>
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2012-11-15 12:06:41 +01:00
|
|
|
#include "kernel/kernel.h"
|
2008-11-19 13:26:10 +01:00
|
|
|
#include "vm.h"
|
2010-05-25 10:06:14 +02:00
|
|
|
#include "clock.h"
|
2010-09-15 16:09:52 +02:00
|
|
|
#include "spinlock.h"
|
2010-09-15 16:09:46 +02:00
|
|
|
#include "arch_proto.h"
|
|
|
|
|
2012-07-16 13:17:11 +02:00
|
|
|
#include <minix/syslib.h>
|
|
|
|
|
2010-02-09 16:26:58 +01:00
|
|
|
/* Scheduling and message passing functions */
|
2012-03-25 20:25:53 +02:00
|
|
|
static void idle(void);
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
/**
|
|
|
|
* Made public for use in clock.c (for user-space scheduling)
|
2012-03-25 20:25:53 +02:00
|
|
|
static int mini_send(struct proc *caller_ptr, endpoint_t dst_e, message
|
2012-03-24 16:16:34 +01:00
|
|
|
*m_ptr, int flags);
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static int mini_receive(struct proc *caller_ptr, endpoint_t src,
|
2012-03-24 16:16:34 +01:00
|
|
|
message *m_ptr, int flags);
|
2012-03-25 20:25:53 +02:00
|
|
|
static int mini_senda(struct proc *caller_ptr, asynmsg_t *table, size_t
|
2012-03-24 16:16:34 +01:00
|
|
|
size);
|
2012-03-25 20:25:53 +02:00
|
|
|
static int deadlock(int function, register struct proc *caller,
|
2012-03-24 16:16:34 +01:00
|
|
|
endpoint_t src_dst_e);
|
2012-03-25 20:25:53 +02:00
|
|
|
static int try_async(struct proc *caller_ptr);
|
|
|
|
static int try_one(struct proc *src_ptr, struct proc *dst_ptr);
|
|
|
|
static struct proc * pick_proc(void);
|
|
|
|
static void enqueue_head(struct proc *rp);
|
2005-07-14 17:12:12 +02:00
|
|
|
|
2010-09-15 16:10:21 +02:00
|
|
|
/* all idles share the same idle_priv structure */
|
2012-03-25 20:25:53 +02:00
|
|
|
static struct priv idle_priv;
|
2010-09-15 16:10:21 +02:00
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void set_idle_name(char * name, int n)
|
2010-09-15 16:10:21 +02:00
|
|
|
{
|
|
|
|
int i, c;
|
|
|
|
int p_z = 0;
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
|
2010-09-15 16:10:21 +02:00
|
|
|
if (n > 999)
|
|
|
|
n = 999;
|
|
|
|
|
|
|
|
name[0] = 'i';
|
|
|
|
name[1] = 'd';
|
|
|
|
name[2] = 'l';
|
|
|
|
name[3] = 'e';
|
|
|
|
|
|
|
|
for (i = 4, c = 100; c > 0; c /= 10) {
|
|
|
|
int digit;
|
|
|
|
|
|
|
|
digit = n / c;
|
|
|
|
n -= digit * c;
|
|
|
|
|
|
|
|
if (p_z || digit != 0 || c == 1) {
|
|
|
|
p_z = 1;
|
|
|
|
name[i++] = '0' + digit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
name[i] = '\0';
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
#define PICK_ANY 1
|
|
|
|
#define PICK_HIGHERONLY 2
|
|
|
|
|
|
|
|
#define BuildNotifyMessage(m_ptr, src, dst_ptr) \
|
2011-11-07 21:11:30 +01:00
|
|
|
(m_ptr)->m_type = NOTIFY_MESSAGE; \
|
2013-03-29 20:34:29 +01:00
|
|
|
(m_ptr)->NOTIFY_TIMESTAMP = get_monotonic(); \
|
2005-07-19 14:21:36 +02:00
|
|
|
switch (src) { \
|
|
|
|
case HARDWARE: \
|
2005-07-14 17:12:12 +02:00
|
|
|
(m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_int_pending; \
|
|
|
|
priv(dst_ptr)->s_int_pending = 0; \
|
2005-07-19 14:21:36 +02:00
|
|
|
break; \
|
|
|
|
case SYSTEM: \
|
|
|
|
(m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_sig_pending; \
|
|
|
|
priv(dst_ptr)->s_sig_pending = 0; \
|
|
|
|
break; \
|
2005-07-14 17:12:12 +02:00
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void proc_init(void)
|
2010-09-15 16:09:43 +02:00
|
|
|
{
|
|
|
|
struct proc * rp;
|
|
|
|
struct priv *sp;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Clear the process table. Anounce each slot as empty and set up
|
|
|
|
* mappings for proc_addr() and proc_nr() macros. Do the same for the
|
|
|
|
* table with privilege structures for the system processes.
|
|
|
|
*/
|
|
|
|
for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) {
|
|
|
|
rp->p_rts_flags = RTS_SLOT_FREE;/* initialize free slot */
|
|
|
|
rp->p_magic = PMAGIC;
|
|
|
|
rp->p_nr = i; /* proc number from ptr */
|
|
|
|
rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */
|
|
|
|
rp->p_scheduler = NULL; /* no user space scheduler */
|
|
|
|
rp->p_priority = 0; /* no priority */
|
|
|
|
rp->p_quantum_size_ms = 0; /* no quantum size */
|
2012-04-19 15:06:47 +02:00
|
|
|
|
|
|
|
/* arch-specific initialization */
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
arch_proc_reset(rp);
|
2010-09-15 16:09:43 +02:00
|
|
|
}
|
|
|
|
for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) {
|
|
|
|
sp->s_proc_nr = NONE; /* initialize as free */
|
|
|
|
sp->s_id = (sys_id_t) i; /* priv structure index */
|
|
|
|
ppriv_addr[i] = sp; /* priv ptr from number */
|
|
|
|
sp->s_sig_mgr = NONE; /* clear signal managers */
|
|
|
|
sp->s_bak_sig_mgr = NONE;
|
|
|
|
}
|
2010-09-15 16:10:21 +02:00
|
|
|
|
|
|
|
idle_priv.s_flags = IDL_F;
|
|
|
|
/* initialize IDLE structures for every CPU */
|
|
|
|
for (i = 0; i < CONFIG_MAX_CPUS; i++) {
|
|
|
|
struct proc * ip = get_cpu_var_ptr(i, idle_proc);
|
2010-09-15 16:10:57 +02:00
|
|
|
ip->p_endpoint = IDLE;
|
2010-09-15 16:10:21 +02:00
|
|
|
ip->p_priv = &idle_priv;
|
2010-09-15 16:10:24 +02:00
|
|
|
/* must not let idle ever get scheduled */
|
|
|
|
ip->p_rts_flags |= RTS_PROC_STOP;
|
2010-09-15 16:10:21 +02:00
|
|
|
set_idle_name(ip->p_name, i);
|
|
|
|
}
|
2010-09-15 16:09:43 +02:00
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void switch_address_space_idle(void)
|
2010-09-15 16:10:18 +02:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/*
|
|
|
|
* currently we bet that VM is always alive and its pages available so
|
|
|
|
* when the CPU wakes up the kernel is mapped and no surprises happen.
|
|
|
|
* This is only a problem if more than 1 cpus are available
|
|
|
|
*/
|
|
|
|
switch_address_space(proc_addr(VM_PROC_NR));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-12-02 12:52:26 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* idle *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static void idle(void)
|
2009-12-02 12:52:26 +01:00
|
|
|
{
|
2010-09-15 16:10:21 +02:00
|
|
|
struct proc * p;
|
|
|
|
|
2009-12-02 12:52:26 +01:00
|
|
|
/* This function is called whenever there is no work to do.
|
|
|
|
* Halt the CPU, and measure how many timestamp counter ticks are
|
|
|
|
* spent not doing anything. This allows test setups to measure
|
|
|
|
* the CPU utiliziation of certain workloads with high precision.
|
|
|
|
*/
|
|
|
|
|
2010-09-15 16:10:21 +02:00
|
|
|
p = get_cpulocal_var(proc_ptr) = get_cpulocal_var_ptr(idle_proc);
|
|
|
|
if (priv(p)->s_flags & BILLABLE)
|
|
|
|
get_cpulocal_var(bill_ptr) = p;
|
|
|
|
|
2010-09-15 16:10:18 +02:00
|
|
|
switch_address_space_idle();
|
|
|
|
|
2010-09-15 16:11:03 +02:00
|
|
|
#ifdef CONFIG_SMP
|
2011-05-11 11:54:23 +02:00
|
|
|
get_cpulocal_var(cpu_is_idle) = 1;
|
2010-09-15 16:10:57 +02:00
|
|
|
/* we don't need to keep time on APs as it is handled on the BSP */
|
|
|
|
if (cpuid != bsp_cpu_id)
|
2010-09-15 16:11:06 +02:00
|
|
|
stop_local_timer();
|
2011-05-11 11:54:23 +02:00
|
|
|
else
|
2010-09-15 16:11:03 +02:00
|
|
|
#endif
|
2011-05-11 11:54:23 +02:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If the timer has expired while in kernel we must
|
|
|
|
* rearm it before we go to sleep
|
|
|
|
*/
|
|
|
|
restart_local_timer();
|
|
|
|
}
|
2010-09-15 16:10:57 +02:00
|
|
|
|
2010-02-10 16:36:54 +01:00
|
|
|
/* start accounting for the idle time */
|
2010-05-18 15:00:39 +02:00
|
|
|
context_stop(proc_addr(KERNEL));
|
2011-05-25 09:42:11 +02:00
|
|
|
#if !SPROFILE
|
|
|
|
halt_cpu();
|
|
|
|
#else
|
2010-09-23 12:49:52 +02:00
|
|
|
if (!sprofiling)
|
|
|
|
halt_cpu();
|
|
|
|
else {
|
|
|
|
volatile int * v;
|
|
|
|
|
|
|
|
v = get_cpulocal_var_ptr(idle_interrupted);
|
|
|
|
interrupts_enable();
|
|
|
|
while (!*v)
|
|
|
|
arch_pause();
|
|
|
|
interrupts_disable();
|
|
|
|
*v = 0;
|
|
|
|
}
|
2011-05-25 09:42:11 +02:00
|
|
|
#endif
|
2010-02-10 16:36:54 +01:00
|
|
|
/*
|
|
|
|
* end of accounting for the idle task does not happen here, the kernel
|
|
|
|
* is handling stuff for quite a while before it gets back here!
|
|
|
|
*/
|
2009-12-02 12:52:26 +01:00
|
|
|
}
|
|
|
|
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
/*===========================================================================*
|
2010-05-18 15:00:39 +02:00
|
|
|
* switch_to_user *
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void switch_to_user(void)
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
{
|
|
|
|
/* This function is called an instant before proc_ptr is
|
|
|
|
* to be scheduled again.
|
|
|
|
*/
|
2010-09-15 16:09:46 +02:00
|
|
|
struct proc * p;
|
2010-10-25 18:21:23 +02:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
int tlb_must_refresh = 0;
|
|
|
|
#endif
|
2009-11-09 18:48:31 +01:00
|
|
|
|
2010-09-15 16:09:46 +02:00
|
|
|
p = get_cpulocal_var(proc_ptr);
|
2009-11-09 18:48:31 +01:00
|
|
|
/*
|
|
|
|
* if the current process is still runnable check the misc flags and let
|
|
|
|
* it run unless it becomes not runnable in the meantime
|
|
|
|
*/
|
2010-09-15 16:09:46 +02:00
|
|
|
if (proc_is_runnable(p))
|
2009-11-09 18:48:31 +01:00
|
|
|
goto check_misc_flags;
|
|
|
|
/*
|
|
|
|
* if a process becomes not runnable while handling the misc flags, we
|
|
|
|
* need to pick a new one here and start from scratch. Also if the
|
2013-05-26 16:11:59 +02:00
|
|
|
* current process wasn't runnable, we pick a new one here
|
2009-11-09 18:48:31 +01:00
|
|
|
*/
|
|
|
|
not_runnable_pick_new:
|
2010-09-15 16:09:46 +02:00
|
|
|
if (proc_is_preempted(p)) {
|
|
|
|
p->p_rts_flags &= ~RTS_PREEMPTED;
|
|
|
|
if (proc_is_runnable(p)) {
|
2013-08-07 12:17:09 +02:00
|
|
|
if (p->p_cpu_time_left)
|
2010-09-15 16:09:46 +02:00
|
|
|
enqueue_head(p);
|
2010-05-25 10:06:14 +02:00
|
|
|
else
|
2010-09-15 16:09:46 +02:00
|
|
|
enqueue(p);
|
2010-05-25 10:06:14 +02:00
|
|
|
}
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
}
|
2009-11-12 09:42:18 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if we have no process to run, set IDLE as the current process for
|
2013-05-26 16:11:59 +02:00
|
|
|
* time accounting and put the cpu in an idle state. After the next
|
2009-11-12 09:42:18 +01:00
|
|
|
* timer interrupt the execution resumes here and we can pick another
|
|
|
|
* process. If there is still nothing runnable we "schedule" IDLE again
|
|
|
|
*/
|
2010-09-15 16:09:46 +02:00
|
|
|
while (!(p = pick_proc())) {
|
2009-12-02 12:52:26 +01:00
|
|
|
idle();
|
2009-11-12 09:42:18 +01:00
|
|
|
}
|
2009-11-09 18:48:31 +01:00
|
|
|
|
2010-09-15 16:09:46 +02:00
|
|
|
/* update the global variable */
|
|
|
|
get_cpulocal_var(proc_ptr) = p;
|
|
|
|
|
2010-10-25 18:21:23 +02:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
if (p->p_misc_flags & MF_FLUSH_TLB && get_cpulocal_var(ptproc) == p)
|
|
|
|
tlb_must_refresh = 1;
|
|
|
|
#endif
|
2010-09-15 16:09:46 +02:00
|
|
|
switch_address_space(p);
|
2010-02-09 16:13:52 +01:00
|
|
|
|
2009-11-09 18:48:31 +01:00
|
|
|
check_misc_flags:
|
|
|
|
|
2010-09-15 16:09:46 +02:00
|
|
|
assert(p);
|
|
|
|
assert(proc_is_runnable(p));
|
|
|
|
while (p->p_misc_flags &
|
2010-02-09 16:20:09 +01:00
|
|
|
(MF_KCALL_RESUME | MF_DELIVERMSG |
|
|
|
|
MF_SC_DEFER | MF_SC_TRACE | MF_SC_ACTIVE)) {
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
|
2010-09-15 16:09:46 +02:00
|
|
|
assert(proc_is_runnable(p));
|
|
|
|
if (p->p_misc_flags & MF_KCALL_RESUME) {
|
|
|
|
kernel_call_resume(p);
|
2010-02-09 16:20:09 +01:00
|
|
|
}
|
2010-09-15 16:09:46 +02:00
|
|
|
else if (p->p_misc_flags & MF_DELIVERMSG) {
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
TRACE(VF_SCHEDULING, printf("delivering to %s / %d\n",
|
2010-09-15 16:09:46 +02:00
|
|
|
p->p_name, p->p_endpoint););
|
|
|
|
delivermsg(p);
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
}
|
2010-09-15 16:09:46 +02:00
|
|
|
else if (p->p_misc_flags & MF_SC_DEFER) {
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
/* Perform the system call that we deferred earlier. */
|
|
|
|
|
2010-09-15 16:09:46 +02:00
|
|
|
assert (!(p->p_misc_flags & MF_SC_ACTIVE));
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
|
2010-09-15 16:09:46 +02:00
|
|
|
arch_do_syscall(p);
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
|
|
|
|
/* If the process is stopped for signal delivery, and
|
|
|
|
* not blocked sending a message after the system call,
|
|
|
|
* inform PM.
|
|
|
|
*/
|
2010-09-15 16:09:46 +02:00
|
|
|
if ((p->p_misc_flags & MF_SIG_DELAY) &&
|
|
|
|
!RTS_ISSET(p, RTS_SENDING))
|
|
|
|
sig_delay_done(p);
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
}
|
2010-09-15 16:09:46 +02:00
|
|
|
else if (p->p_misc_flags & MF_SC_TRACE) {
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
/* Trigger a system call leave event if this was a
|
|
|
|
* system call. We must do this after processing the
|
|
|
|
* other flags above, both for tracing correctness and
|
|
|
|
* to be able to use 'break'.
|
|
|
|
*/
|
2010-09-15 16:09:46 +02:00
|
|
|
if (!(p->p_misc_flags & MF_SC_ACTIVE))
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
break;
|
|
|
|
|
2010-09-15 16:09:46 +02:00
|
|
|
p->p_misc_flags &=
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
~(MF_SC_TRACE | MF_SC_ACTIVE);
|
|
|
|
|
|
|
|
/* Signal the "leave system call" event.
|
|
|
|
* Block the process.
|
|
|
|
*/
|
2010-09-15 16:09:46 +02:00
|
|
|
cause_sig(proc_nr(p), SIGTRAP);
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
}
|
2010-09-15 16:09:46 +02:00
|
|
|
else if (p->p_misc_flags & MF_SC_ACTIVE) {
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
/* If MF_SC_ACTIVE was set, remove it now:
|
|
|
|
* we're leaving the system call.
|
|
|
|
*/
|
2010-09-15 16:09:46 +02:00
|
|
|
p->p_misc_flags &= ~MF_SC_ACTIVE;
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-09-15 16:09:46 +02:00
|
|
|
/*
|
|
|
|
* the selected process might not be runnable anymore. We have
|
|
|
|
* to checkit and schedule another one
|
|
|
|
*/
|
|
|
|
if (!proc_is_runnable(p))
|
|
|
|
goto not_runnable_pick_new;
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
}
|
2010-05-25 10:06:14 +02:00
|
|
|
/*
|
|
|
|
* check the quantum left before it runs again. We must do it only here
|
|
|
|
* as we are sure that a possible out-of-quantum message to the
|
|
|
|
* scheduler will not collide with the regular ipc
|
|
|
|
*/
|
2013-08-07 12:17:09 +02:00
|
|
|
if (!p->p_cpu_time_left)
|
2010-09-15 16:09:46 +02:00
|
|
|
proc_no_time(p);
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
/*
|
|
|
|
* After handling the misc flags the selected process might not be
|
|
|
|
* runnable anymore. We have to checkit and schedule another one
|
|
|
|
*/
|
2010-09-15 16:09:46 +02:00
|
|
|
if (!proc_is_runnable(p))
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
goto not_runnable_pick_new;
|
|
|
|
|
2010-09-15 16:11:01 +02:00
|
|
|
TRACE(VF_SCHEDULING, printf("cpu %d starting %s / %d "
|
|
|
|
"pc 0x%08x\n",
|
|
|
|
cpuid, p->p_name, p->p_endpoint, p->p_reg.pc););
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
#if DEBUG_TRACE
|
2010-09-15 16:09:46 +02:00
|
|
|
p->p_schedules++;
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
#endif
|
2009-11-06 10:08:26 +01:00
|
|
|
|
2010-09-15 16:09:46 +02:00
|
|
|
p = arch_finish_switch_to_user();
|
2013-08-07 12:17:09 +02:00
|
|
|
assert(p->p_cpu_time_left);
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
|
2010-05-18 15:00:39 +02:00
|
|
|
context_stop(proc_addr(KERNEL));
|
2009-11-06 10:08:26 +01:00
|
|
|
|
2010-06-07 09:43:17 +02:00
|
|
|
/* If the process isn't the owner of FPU, enable the FPU exception */
|
2013-08-07 12:17:09 +02:00
|
|
|
if (get_cpulocal_var(fpu_owner) != p)
|
2010-06-07 09:43:17 +02:00
|
|
|
enable_fpu_exception();
|
|
|
|
else
|
|
|
|
disable_fpu_exception();
|
2010-07-20 19:13:44 +02:00
|
|
|
|
|
|
|
/* If MF_CONTEXT_SET is set, don't clobber process state within
|
|
|
|
* the kernel. The next kernel entry is OK again though.
|
|
|
|
*/
|
2010-09-15 16:09:46 +02:00
|
|
|
p->p_misc_flags &= ~MF_CONTEXT_SET;
|
2010-07-20 19:13:44 +02:00
|
|
|
|
2012-10-08 03:38:03 +02:00
|
|
|
#if defined(__i386__)
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
assert(p->p_seg.p_cr3 != 0);
|
2012-10-08 03:38:03 +02:00
|
|
|
#elif defined(__arm__)
|
|
|
|
assert(p->p_seg.p_ttbr != 0);
|
|
|
|
#endif
|
2010-09-22 10:01:36 +02:00
|
|
|
#ifdef CONFIG_SMP
|
2010-10-25 18:21:23 +02:00
|
|
|
if (p->p_misc_flags & MF_FLUSH_TLB) {
|
|
|
|
if (tlb_must_refresh)
|
|
|
|
refresh_tlb();
|
|
|
|
p->p_misc_flags &= ~MF_FLUSH_TLB;
|
|
|
|
}
|
2010-09-22 10:01:36 +02:00
|
|
|
#endif
|
2010-10-21 19:07:01 +02:00
|
|
|
|
|
|
|
restart_local_timer();
|
|
|
|
|
2010-05-18 15:00:39 +02:00
|
|
|
/*
|
|
|
|
* restore_user_context() carries out the actual mode switch from kernel
|
|
|
|
* to userspace. This function does not return
|
|
|
|
*/
|
2010-09-15 16:09:46 +02:00
|
|
|
restore_user_context(p);
|
2010-05-18 15:00:39 +02:00
|
|
|
NOT_REACHABLE;
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
}
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2010-04-06 13:24:26 +02:00
|
|
|
/*
|
2010-04-06 13:29:31 +02:00
|
|
|
* handler for all synchronous IPC calls
|
2005-04-21 16:53:53 +02:00
|
|
|
*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static int do_sync_ipc(struct proc * caller_ptr, /* who made the call */
|
2010-04-06 13:24:26 +02:00
|
|
|
int call_nr, /* system call number and flags */
|
|
|
|
endpoint_t src_dst_e, /* src or dst of the call */
|
|
|
|
message *m_ptr) /* users pointer to a message */
|
|
|
|
{
|
2005-04-21 16:53:53 +02:00
|
|
|
int result; /* the system call's result */
|
2007-04-23 16:24:30 +02:00
|
|
|
int src_dst_p; /* Process slot number */
|
2010-07-16 17:36:29 +02:00
|
|
|
char *callname;
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
|
2010-04-06 13:24:26 +02:00
|
|
|
/* Check destination. RECEIVE is the only call that accepts ANY (in addition
|
|
|
|
* to a real endpoint). The other calls (SEND, SENDREC, and NOTIFY) require an
|
|
|
|
* endpoint to corresponds to a process. In addition, it is necessary to check
|
|
|
|
* whether a process is allowed to send to a given destination.
|
2007-04-23 16:24:30 +02:00
|
|
|
*/
|
2010-04-06 13:24:26 +02:00
|
|
|
assert(call_nr != SENDA);
|
|
|
|
|
2010-07-16 17:36:29 +02:00
|
|
|
/* Only allow non-negative call_nr values less than 32 */
|
|
|
|
if (call_nr < 0 || call_nr > IPCNO_HIGHEST || call_nr >= 32
|
|
|
|
|| !(callname = ipc_call_names[call_nr])) {
|
|
|
|
#if DEBUG_ENABLE_IPC_WARNINGS
|
|
|
|
printf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
|
2012-03-30 16:53:07 +02:00
|
|
|
call_nr, proc_nr(caller_ptr), src_dst_e);
|
2010-07-16 17:36:29 +02:00
|
|
|
#endif
|
|
|
|
return(ETRAPDENIED); /* trap denied by mask or kernel */
|
|
|
|
}
|
|
|
|
|
2010-04-06 13:24:26 +02:00
|
|
|
if (src_dst_e == ANY)
|
2007-04-23 16:24:30 +02:00
|
|
|
{
|
|
|
|
if (call_nr != RECEIVE)
|
|
|
|
{
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
#if 0
|
2010-07-16 17:36:29 +02:00
|
|
|
printf("sys_call: %s by %d with bad endpoint %d\n",
|
|
|
|
callname,
|
|
|
|
proc_nr(caller_ptr), src_dst_e);
|
'proc number' is process slot, 'endpoint' are generation-aware process
instance numbers, encoded and decoded using macros in <minix/endpoint.h>.
proc number -> endpoint migration
. proc_nr in the interrupt hook is now an endpoint, proc_nr_e.
. m_source for messages and notifies is now an endpoint, instead of
proc number.
. isokendpt() converts an endpoint to a process number, returns
success (but fails if the process number is out of range, the
process slot is not a living process, or the given endpoint
number does not match the endpoint number in the process slot,
indicating an old process).
. okendpt() is the same as isokendpt(), but panic()s if the conversion
fails. This is mainly used for decoding message.m_source endpoints,
and other endpoint numbers in kernel data structures, which should
always be correct.
. if DEBUG_ENABLE_IPC_WARNINGS is enabled, isokendpt() and okendpt()
get passed the __FILE__ and __LINE__ of the calling lines, and
print messages about what is wrong with the endpoint number
(out of range proc, empty proc, or inconsistent endpoint number),
with the caller, making finding where the conversion failed easy
without having to include code for every call to print where things
went wrong. Sometimes this is harmless (wrong arg to a kernel call),
sometimes it's a fatal internal inconsistency (bogus m_source).
. some process table fields have been appended an _e to indicate it's
become and endpoint.
. process endpoint is stored in p_endpoint, without generation number.
it turns out the kernel never needs the generation number, except
when fork()ing, so it's decoded then.
. kernel calls all take endpoints as arguments, not proc numbers.
the one exception is sys_fork(), which needs to know in which slot
to put the child.
2006-03-03 11:00:02 +01:00
|
|
|
#endif
|
2007-04-23 16:24:30 +02:00
|
|
|
return EINVAL;
|
|
|
|
}
|
2010-04-06 13:24:26 +02:00
|
|
|
src_dst_p = (int) src_dst_e;
|
2007-04-23 16:24:30 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Require a valid source and/or destination process. */
|
|
|
|
if(!isokendpt(src_dst_e, &src_dst_p)) {
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
#if 0
|
2010-07-16 17:36:29 +02:00
|
|
|
printf("sys_call: %s by %d with bad endpoint %d\n",
|
|
|
|
callname,
|
|
|
|
proc_nr(caller_ptr), src_dst_e);
|
2007-04-23 16:24:30 +02:00
|
|
|
#endif
|
|
|
|
return EDEADSRCDST;
|
|
|
|
}
|
|
|
|
|
2009-07-02 18:25:31 +02:00
|
|
|
/* If the call is to send to a process, i.e., for SEND, SENDNB,
|
2007-04-23 16:24:30 +02:00
|
|
|
* SENDREC or NOTIFY, verify that the caller is allowed to send to
|
|
|
|
* the given destination.
|
|
|
|
*/
|
2009-07-02 18:25:31 +02:00
|
|
|
if (call_nr != RECEIVE)
|
2007-04-23 16:24:30 +02:00
|
|
|
{
|
2009-07-02 18:25:31 +02:00
|
|
|
if (!may_send_to(caller_ptr, src_dst_p)) {
|
2007-04-23 16:24:30 +02:00
|
|
|
#if DEBUG_ENABLE_IPC_WARNINGS
|
2010-03-03 16:45:01 +01:00
|
|
|
printf(
|
2010-07-16 17:36:29 +02:00
|
|
|
"sys_call: ipc mask denied %s from %d to %d\n",
|
|
|
|
callname,
|
|
|
|
caller_ptr->p_endpoint, src_dst_e);
|
2007-04-23 16:24:30 +02:00
|
|
|
#endif
|
|
|
|
return(ECALLDENIED); /* call denied by ipc mask */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-05-31 11:50:51 +02:00
|
|
|
/* Check if the process has privileges for the requested call. Calls to the
|
|
|
|
* kernel may only be SENDREC, because tasks always reply and may not block
|
|
|
|
* if the caller doesn't do receive().
|
2005-04-21 16:53:53 +02:00
|
|
|
*/
|
2007-04-23 16:24:30 +02:00
|
|
|
if (!(priv(caller_ptr)->s_trap_mask & (1 << call_nr))) {
|
2005-10-18 18:13:12 +02:00
|
|
|
#if DEBUG_ENABLE_IPC_WARNINGS
|
2010-07-16 17:36:29 +02:00
|
|
|
printf("sys_call: %s not allowed, caller %d, src_dst %d\n",
|
|
|
|
callname, proc_nr(caller_ptr), src_dst_p);
|
2005-10-18 18:13:12 +02:00
|
|
|
#endif
|
2008-02-22 13:36:46 +01:00
|
|
|
return(ETRAPDENIED); /* trap denied by mask or kernel */
|
2005-08-05 20:57:20 +02:00
|
|
|
}
|
2005-05-26 15:17:57 +02:00
|
|
|
|
2010-04-06 13:24:26 +02:00
|
|
|
if (call_nr != SENDREC && call_nr != RECEIVE && iskerneln(src_dst_p)) {
|
2005-10-18 18:13:12 +02:00
|
|
|
#if DEBUG_ENABLE_IPC_WARNINGS
|
2011-07-18 19:44:17 +02:00
|
|
|
printf("sys_call: trap %s not allowed, caller %d, src_dst %d\n",
|
2010-07-16 17:36:29 +02:00
|
|
|
callname, proc_nr(caller_ptr), src_dst_e);
|
2005-10-18 18:13:12 +02:00
|
|
|
#endif
|
2008-02-22 13:36:46 +01:00
|
|
|
return(ETRAPDENIED); /* trap denied by mask or kernel */
|
2005-07-26 14:48:34 +02:00
|
|
|
}
|
|
|
|
|
2007-04-23 16:24:30 +02:00
|
|
|
switch(call_nr) {
|
2005-07-27 16:08:59 +02:00
|
|
|
case SENDREC:
|
2007-04-23 16:24:30 +02:00
|
|
|
/* A flag is set so that notifications cannot interrupt SENDREC. */
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
caller_ptr->p_misc_flags |= MF_REPLY_PEND;
|
2007-04-23 16:24:30 +02:00
|
|
|
/* fall through */
|
2005-05-27 14:44:14 +02:00
|
|
|
case SEND:
|
2008-11-19 13:26:10 +01:00
|
|
|
result = mini_send(caller_ptr, src_dst_e, m_ptr, 0);
|
2007-04-23 16:24:30 +02:00
|
|
|
if (call_nr == SEND || result != OK)
|
|
|
|
break; /* done, or SEND failed */
|
|
|
|
/* fall through for SENDREC */
|
2005-05-26 15:17:57 +02:00
|
|
|
case RECEIVE:
|
2010-04-26 16:43:59 +02:00
|
|
|
if (call_nr == RECEIVE) {
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
caller_ptr->p_misc_flags &= ~MF_REPLY_PEND;
|
2010-04-26 16:43:59 +02:00
|
|
|
IPC_STATUS_CLEAR(caller_ptr); /* clear IPC status code */
|
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
result = mini_receive(caller_ptr, src_dst_e, m_ptr, 0);
|
2007-04-23 16:24:30 +02:00
|
|
|
break;
|
2005-05-26 15:17:57 +02:00
|
|
|
case NOTIFY:
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
result = mini_notify(caller_ptr, src_dst_e);
|
2007-04-23 16:24:30 +02:00
|
|
|
break;
|
2008-11-19 13:26:10 +01:00
|
|
|
case SENDNB:
|
|
|
|
result = mini_send(caller_ptr, src_dst_e, m_ptr, NON_BLOCKING);
|
|
|
|
break;
|
2005-05-26 15:17:57 +02:00
|
|
|
default:
|
2007-04-23 16:24:30 +02:00
|
|
|
result = EBADCALL; /* illegal system call */
|
2005-04-21 16:53:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now, return the result of the system call to the caller. */
|
|
|
|
return(result);
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
int do_ipc(reg_t r1, reg_t r2, reg_t r3)
|
2010-04-06 13:24:26 +02:00
|
|
|
{
|
2010-09-15 16:09:46 +02:00
|
|
|
struct proc *const caller_ptr = get_cpulocal_var(proc_ptr); /* get pointer to caller */
|
2010-04-06 13:24:26 +02:00
|
|
|
int call_nr = (int) r1;
|
|
|
|
|
|
|
|
assert(!RTS_ISSET(caller_ptr, RTS_SLOT_FREE));
|
|
|
|
|
2011-02-08 14:58:32 +01:00
|
|
|
/* bill kernel time to this process. */
|
|
|
|
kbill_ipc = caller_ptr;
|
|
|
|
|
2010-04-06 13:24:26 +02:00
|
|
|
/* If this process is subject to system call tracing, handle that first. */
|
|
|
|
if (caller_ptr->p_misc_flags & (MF_SC_TRACE | MF_SC_DEFER)) {
|
|
|
|
/* Are we tracing this process, and is it the first sys_call entry? */
|
|
|
|
if ((caller_ptr->p_misc_flags & (MF_SC_TRACE | MF_SC_DEFER)) ==
|
|
|
|
MF_SC_TRACE) {
|
|
|
|
/* We must notify the tracer before processing the actual
|
|
|
|
* system call. If we don't, the tracer could not obtain the
|
|
|
|
* input message. Postpone the entire system call.
|
|
|
|
*/
|
|
|
|
caller_ptr->p_misc_flags &= ~MF_SC_TRACE;
|
2013-01-04 18:14:31 +01:00
|
|
|
assert(!(caller_ptr->p_misc_flags & MF_SC_DEFER));
|
2010-04-06 13:24:26 +02:00
|
|
|
caller_ptr->p_misc_flags |= MF_SC_DEFER;
|
2013-01-04 18:14:31 +01:00
|
|
|
caller_ptr->p_defer.r1 = r1;
|
|
|
|
caller_ptr->p_defer.r2 = r2;
|
|
|
|
caller_ptr->p_defer.r3 = r3;
|
2010-04-06 13:24:26 +02:00
|
|
|
|
|
|
|
/* Signal the "enter system call" event. Block the process. */
|
|
|
|
cause_sig(proc_nr(caller_ptr), SIGTRAP);
|
|
|
|
|
|
|
|
/* Preserve the return register's value. */
|
|
|
|
return caller_ptr->p_reg.retreg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the MF_SC_DEFER flag is set, the syscall is now being resumed. */
|
|
|
|
caller_ptr->p_misc_flags &= ~MF_SC_DEFER;
|
|
|
|
|
|
|
|
assert (!(caller_ptr->p_misc_flags & MF_SC_ACTIVE));
|
|
|
|
|
|
|
|
/* Set a flag to allow reliable tracing of leaving the system call. */
|
|
|
|
caller_ptr->p_misc_flags |= MF_SC_ACTIVE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(caller_ptr->p_misc_flags & MF_DELIVERMSG) {
|
|
|
|
panic("sys_call: MF_DELIVERMSG on for %s / %d\n",
|
|
|
|
caller_ptr->p_name, caller_ptr->p_endpoint);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now check if the call is known and try to perform the request. The only
|
|
|
|
* system calls that exist in MINIX are sending and receiving messages.
|
|
|
|
* - SENDREC: combines SEND and RECEIVE in a single system call
|
|
|
|
* - SEND: sender blocks until its message has been delivered
|
|
|
|
* - RECEIVE: receiver blocks until an acceptable message has arrived
|
|
|
|
* - NOTIFY: asynchronous call; deliver notification or mark pending
|
|
|
|
* - SENDA: list of asynchronous send requests
|
|
|
|
*/
|
|
|
|
switch(call_nr) {
|
|
|
|
case SENDREC:
|
|
|
|
case SEND:
|
|
|
|
case RECEIVE:
|
|
|
|
case NOTIFY:
|
|
|
|
case SENDNB:
|
2010-09-19 17:52:12 +02:00
|
|
|
{
|
|
|
|
/* Process accounting for scheduling */
|
|
|
|
caller_ptr->p_accounting.ipc_sync++;
|
|
|
|
|
2010-04-06 13:24:26 +02:00
|
|
|
return do_sync_ipc(caller_ptr, call_nr, (endpoint_t) r2,
|
|
|
|
(message *) r3);
|
2010-09-19 17:52:12 +02:00
|
|
|
}
|
2010-04-06 13:24:26 +02:00
|
|
|
case SENDA:
|
|
|
|
{
|
2010-09-19 17:52:12 +02:00
|
|
|
/*
|
2010-04-06 13:24:26 +02:00
|
|
|
* Get and check the size of the argument in bytes as it is a
|
|
|
|
* table
|
|
|
|
*/
|
|
|
|
size_t msg_size = (size_t) r2;
|
|
|
|
|
2010-09-19 17:52:12 +02:00
|
|
|
/* Process accounting for scheduling */
|
|
|
|
caller_ptr->p_accounting.ipc_async++;
|
|
|
|
|
2010-04-06 13:24:26 +02:00
|
|
|
/* Limit size to something reasonable. An arbitrary choice is 16
|
|
|
|
* times the number of process table entries.
|
|
|
|
*/
|
|
|
|
if (msg_size > 16*(NR_TASKS + NR_PROCS))
|
|
|
|
return EDOM;
|
|
|
|
return mini_senda(caller_ptr, (asynmsg_t *) r3, msg_size);
|
|
|
|
}
|
2012-07-18 18:53:20 +02:00
|
|
|
case MINIX_KERNINFO:
|
|
|
|
{
|
|
|
|
/* It might not be initialized yet. */
|
|
|
|
if(!minix_kerninfo_user) {
|
|
|
|
return EBADCALL;
|
|
|
|
}
|
|
|
|
|
|
|
|
arch_set_secondary_ipc_return(caller_ptr, minix_kerninfo_user);
|
|
|
|
return OK;
|
|
|
|
}
|
2010-04-06 13:24:26 +02:00
|
|
|
default:
|
|
|
|
return EBADCALL; /* illegal system call */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-10-12 17:08:23 +02:00
|
|
|
/*===========================================================================*
|
|
|
|
* deadlock *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static int deadlock(function, cp, src_dst_e)
|
2005-10-12 17:08:23 +02:00
|
|
|
int function; /* trap number */
|
|
|
|
register struct proc *cp; /* pointer to caller */
|
2010-07-28 16:14:06 +02:00
|
|
|
endpoint_t src_dst_e; /* src or dst process */
|
2005-10-12 17:08:23 +02:00
|
|
|
{
|
|
|
|
/* Check for deadlock. This can happen if 'caller_ptr' and 'src_dst' have
|
|
|
|
* a cyclic dependency of blocking send and receive calls. The only cyclic
|
|
|
|
* depency that is not fatal is if the caller and target directly SEND(REC)
|
|
|
|
* and RECEIVE to each other. If a deadlock is found, the group size is
|
|
|
|
* returned. Otherwise zero is returned.
|
|
|
|
*/
|
|
|
|
register struct proc *xp; /* process pointer */
|
|
|
|
int group_size = 1; /* start with only caller */
|
2008-11-19 13:26:10 +01:00
|
|
|
#if DEBUG_ENABLE_IPC_WARNINGS
|
|
|
|
static struct proc *processes[NR_PROCS + NR_TASKS];
|
|
|
|
processes[0] = cp;
|
|
|
|
#endif
|
2005-10-12 17:08:23 +02:00
|
|
|
|
2010-07-28 16:14:06 +02:00
|
|
|
while (src_dst_e != ANY) { /* check while process nr */
|
|
|
|
int src_dst_slot;
|
|
|
|
okendpt(src_dst_e, &src_dst_slot);
|
|
|
|
xp = proc_addr(src_dst_slot); /* follow chain of processes */
|
|
|
|
assert(proc_ptr_ok(xp));
|
|
|
|
assert(!RTS_ISSET(xp, RTS_SLOT_FREE));
|
2008-11-19 13:26:10 +01:00
|
|
|
#if DEBUG_ENABLE_IPC_WARNINGS
|
|
|
|
processes[group_size] = xp;
|
|
|
|
#endif
|
2005-10-12 17:08:23 +02:00
|
|
|
group_size ++; /* extra process in group */
|
|
|
|
|
2006-03-09 14:59:59 +01:00
|
|
|
/* Check whether the last process in the chain has a dependency. If it
|
2005-10-12 17:08:23 +02:00
|
|
|
* has not, the cycle cannot be closed and we are done.
|
|
|
|
*/
|
2010-07-28 16:14:06 +02:00
|
|
|
if((src_dst_e = P_BLOCKEDON(xp)) == NONE)
|
2010-03-03 16:32:26 +01:00
|
|
|
return 0;
|
|
|
|
|
2005-10-12 17:08:23 +02:00
|
|
|
/* Now check if there is a cyclic dependency. For group sizes of two,
|
|
|
|
* a combination of SEND(REC) and RECEIVE is not fatal. Larger groups
|
|
|
|
* or other combinations indicate a deadlock.
|
|
|
|
*/
|
2010-07-28 16:14:06 +02:00
|
|
|
if (src_dst_e == cp->p_endpoint) { /* possible deadlock */
|
2005-10-12 17:08:23 +02:00
|
|
|
if (group_size == 2) { /* caller and src_dst */
|
|
|
|
/* The function number is magically converted to flags. */
|
2009-11-10 10:11:13 +01:00
|
|
|
if ((xp->p_rts_flags ^ (function << 2)) & RTS_SENDING) {
|
2005-10-12 17:08:23 +02:00
|
|
|
return(0); /* not a deadlock */
|
|
|
|
}
|
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
#if DEBUG_ENABLE_IPC_WARNINGS
|
|
|
|
{
|
|
|
|
int i;
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("deadlock between these processes:\n");
|
2008-11-19 13:26:10 +01:00
|
|
|
for(i = 0; i < group_size; i++) {
|
2010-03-03 16:45:01 +01:00
|
|
|
printf(" %10s ", processes[i]->p_name);
|
2010-05-03 19:38:54 +02:00
|
|
|
}
|
|
|
|
printf("\n\n");
|
|
|
|
for(i = 0; i < group_size; i++) {
|
|
|
|
print_proc(processes[i]);
|
2008-11-19 13:26:10 +01:00
|
|
|
proc_stacktrace(processes[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2005-10-12 17:08:23 +02:00
|
|
|
return(group_size); /* deadlock found */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return(0); /* not a deadlock */
|
|
|
|
}
|
|
|
|
|
2011-04-08 17:03:33 +02:00
|
|
|
/*===========================================================================*
|
|
|
|
* has_pending *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static int has_pending(sys_map_t *map, int src_p, int asynm)
|
2011-04-08 17:03:33 +02:00
|
|
|
{
|
|
|
|
/* Check to see if there is a pending message from the desired source
|
|
|
|
* available.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int src_id;
|
|
|
|
sys_id_t id = NULL_PRIV_ID;
|
2011-10-25 20:32:30 +02:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
struct proc * p;
|
|
|
|
#endif
|
2011-04-08 17:03:33 +02:00
|
|
|
|
|
|
|
/* Either check a specific bit in the mask map, or find the first bit set in
|
|
|
|
* it (if any), depending on whether the receive was called on a specific
|
|
|
|
* source endpoint.
|
|
|
|
*/
|
|
|
|
if (src_p != ANY) {
|
|
|
|
src_id = nr_to_id(src_p);
|
2011-10-25 20:32:30 +02:00
|
|
|
if (get_sys_bit(*map, src_id)) {
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
p = proc_addr(id_to_nr(src_id));
|
|
|
|
if (asynm && RTS_ISSET(p, RTS_VMINHIBIT))
|
|
|
|
p->p_misc_flags |= MF_SENDA_VM_MISS;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
id = src_id;
|
|
|
|
}
|
2011-04-08 17:03:33 +02:00
|
|
|
} else {
|
|
|
|
/* Find a source with a pending message */
|
|
|
|
for (src_id = 0; src_id < NR_SYS_PROCS; src_id += BITCHUNK_BITS) {
|
|
|
|
if (get_sys_bits(*map, src_id) != 0) {
|
2011-10-25 20:32:30 +02:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
while (src_id < NR_SYS_PROCS) {
|
|
|
|
while (!get_sys_bit(*map, src_id)) {
|
|
|
|
if (src_id == NR_SYS_PROCS)
|
|
|
|
goto quit_search;
|
|
|
|
src_id++;
|
|
|
|
}
|
|
|
|
p = proc_addr(id_to_nr(src_id));
|
|
|
|
/*
|
|
|
|
* We must not let kernel fiddle with pages of a
|
|
|
|
* process which are currently being changed by
|
|
|
|
* VM. It is dangerous! So do not report such a
|
|
|
|
* process as having pending async messages.
|
|
|
|
* Skip it.
|
|
|
|
*/
|
|
|
|
if (asynm && RTS_ISSET(p, RTS_VMINHIBIT)) {
|
|
|
|
p->p_misc_flags |= MF_SENDA_VM_MISS;
|
|
|
|
src_id++;
|
|
|
|
} else
|
|
|
|
goto quit_search;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
while (!get_sys_bit(*map, src_id)) src_id++;
|
|
|
|
goto quit_search;
|
|
|
|
#endif
|
2011-04-08 17:03:33 +02:00
|
|
|
}
|
|
|
|
}
|
2011-10-25 20:32:30 +02:00
|
|
|
|
|
|
|
quit_search:
|
2011-04-08 17:03:33 +02:00
|
|
|
if (src_id < NR_SYS_PROCS) /* Found one */
|
|
|
|
id = src_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
return(id);
|
|
|
|
}
|
|
|
|
|
2011-10-31 16:21:08 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* has_pending_notify *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int has_pending_notify(struct proc * caller, int src_p)
|
2011-10-31 16:21:08 +01:00
|
|
|
{
|
|
|
|
sys_map_t * map = &priv(caller)->s_notify_pending;
|
|
|
|
return has_pending(map, src_p, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* has_pending_asend *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int has_pending_asend(struct proc * caller, int src_p)
|
2011-10-31 16:21:08 +01:00
|
|
|
{
|
|
|
|
sys_map_t * map = &priv(caller)->s_asyn_pending;
|
|
|
|
return has_pending(map, src_p, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* unset_notify_pending *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void unset_notify_pending(struct proc * caller, int src_p)
|
2011-10-31 16:21:08 +01:00
|
|
|
{
|
|
|
|
sys_map_t * map = &priv(caller)->s_notify_pending;
|
|
|
|
unset_sys_bit(*map, src_p);
|
|
|
|
}
|
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/*===========================================================================*
|
|
|
|
* mini_send *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int mini_send(
|
2010-06-02 23:51:32 +02:00
|
|
|
register struct proc *caller_ptr, /* who is trying to send a message? */
|
|
|
|
endpoint_t dst_e, /* to whom is message being sent? */
|
|
|
|
message *m_ptr, /* pointer to message buffer */
|
|
|
|
const int flags
|
|
|
|
)
|
2005-04-21 16:53:53 +02:00
|
|
|
{
|
2005-05-24 12:06:17 +02:00
|
|
|
/* Send a message from 'caller_ptr' to 'dst'. If 'dst' is blocked waiting
|
|
|
|
* for this message, copy the message to it and unblock 'dst'. If 'dst' is
|
2005-04-21 16:53:53 +02:00
|
|
|
* not waiting at all, or is waiting for another source, queue 'caller_ptr'.
|
|
|
|
*/
|
'proc number' is process slot, 'endpoint' are generation-aware process
instance numbers, encoded and decoded using macros in <minix/endpoint.h>.
proc number -> endpoint migration
. proc_nr in the interrupt hook is now an endpoint, proc_nr_e.
. m_source for messages and notifies is now an endpoint, instead of
proc number.
. isokendpt() converts an endpoint to a process number, returns
success (but fails if the process number is out of range, the
process slot is not a living process, or the given endpoint
number does not match the endpoint number in the process slot,
indicating an old process).
. okendpt() is the same as isokendpt(), but panic()s if the conversion
fails. This is mainly used for decoding message.m_source endpoints,
and other endpoint numbers in kernel data structures, which should
always be correct.
. if DEBUG_ENABLE_IPC_WARNINGS is enabled, isokendpt() and okendpt()
get passed the __FILE__ and __LINE__ of the calling lines, and
print messages about what is wrong with the endpoint number
(out of range proc, empty proc, or inconsistent endpoint number),
with the caller, making finding where the conversion failed easy
without having to include code for every call to print where things
went wrong. Sometimes this is harmless (wrong arg to a kernel call),
sometimes it's a fatal internal inconsistency (bogus m_source).
. some process table fields have been appended an _e to indicate it's
become and endpoint.
. process endpoint is stored in p_endpoint, without generation number.
it turns out the kernel never needs the generation number, except
when fork()ing, so it's decoded then.
. kernel calls all take endpoints as arguments, not proc numbers.
the one exception is sys_fork(), which needs to know in which slot
to put the child.
2006-03-03 11:00:02 +01:00
|
|
|
register struct proc *dst_ptr;
|
2005-05-26 15:17:57 +02:00
|
|
|
register struct proc **xpp;
|
'proc number' is process slot, 'endpoint' are generation-aware process
instance numbers, encoded and decoded using macros in <minix/endpoint.h>.
proc number -> endpoint migration
. proc_nr in the interrupt hook is now an endpoint, proc_nr_e.
. m_source for messages and notifies is now an endpoint, instead of
proc number.
. isokendpt() converts an endpoint to a process number, returns
success (but fails if the process number is out of range, the
process slot is not a living process, or the given endpoint
number does not match the endpoint number in the process slot,
indicating an old process).
. okendpt() is the same as isokendpt(), but panic()s if the conversion
fails. This is mainly used for decoding message.m_source endpoints,
and other endpoint numbers in kernel data structures, which should
always be correct.
. if DEBUG_ENABLE_IPC_WARNINGS is enabled, isokendpt() and okendpt()
get passed the __FILE__ and __LINE__ of the calling lines, and
print messages about what is wrong with the endpoint number
(out of range proc, empty proc, or inconsistent endpoint number),
with the caller, making finding where the conversion failed easy
without having to include code for every call to print where things
went wrong. Sometimes this is harmless (wrong arg to a kernel call),
sometimes it's a fatal internal inconsistency (bogus m_source).
. some process table fields have been appended an _e to indicate it's
become and endpoint.
. process endpoint is stored in p_endpoint, without generation number.
it turns out the kernel never needs the generation number, except
when fork()ing, so it's decoded then.
. kernel calls all take endpoints as arguments, not proc numbers.
the one exception is sys_fork(), which needs to know in which slot
to put the child.
2006-03-03 11:00:02 +01:00
|
|
|
int dst_p;
|
|
|
|
dst_p = _ENDPOINT_P(dst_e);
|
|
|
|
dst_ptr = proc_addr(dst_p);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2009-11-10 10:11:13 +01:00
|
|
|
if (RTS_ISSET(dst_ptr, RTS_NO_ENDPOINT))
|
2008-02-22 13:36:46 +01:00
|
|
|
{
|
2010-03-03 16:32:26 +01:00
|
|
|
return EDEADSRCDST;
|
2008-02-22 13:36:46 +01:00
|
|
|
}
|
2006-03-15 13:01:59 +01:00
|
|
|
|
2005-05-26 15:17:57 +02:00
|
|
|
/* Check if 'dst' is blocked waiting for this message. The destination's
|
2009-11-10 10:11:13 +01:00
|
|
|
* RTS_SENDING flag may be set when its SENDREC call blocked while sending.
|
2005-05-26 15:17:57 +02:00
|
|
|
*/
|
2008-11-19 13:26:10 +01:00
|
|
|
if (WILLRECEIVE(dst_ptr, caller_ptr->p_endpoint)) {
|
2010-03-23 01:09:11 +01:00
|
|
|
int call;
|
2005-04-21 16:53:53 +02:00
|
|
|
/* Destination is indeed waiting for this message. */
|
2010-03-10 14:00:05 +01:00
|
|
|
assert(!(dst_ptr->p_misc_flags & MF_DELIVERMSG));
|
2010-03-29 13:16:37 +02:00
|
|
|
|
|
|
|
if (!(flags & FROM_KERNEL)) {
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
if(copy_msg_from_user(m_ptr, &dst_ptr->p_delivermsg))
|
2010-03-29 13:16:37 +02:00
|
|
|
return EFAULT;
|
2010-03-29 13:25:01 +02:00
|
|
|
} else {
|
2010-03-29 13:16:37 +02:00
|
|
|
dst_ptr->p_delivermsg = *m_ptr;
|
2010-04-26 16:43:59 +02:00
|
|
|
IPC_STATUS_ADD_FLAGS(dst_ptr, IPC_FLG_MSG_FROM_KERNEL);
|
2010-03-29 13:25:01 +02:00
|
|
|
}
|
|
|
|
|
2010-03-29 13:16:37 +02:00
|
|
|
dst_ptr->p_delivermsg.m_source = caller_ptr->p_endpoint;
|
|
|
|
dst_ptr->p_misc_flags |= MF_DELIVERMSG;
|
|
|
|
|
2010-03-23 01:09:11 +01:00
|
|
|
call = (caller_ptr->p_misc_flags & MF_REPLY_PEND ? SENDREC
|
|
|
|
: (flags & NON_BLOCKING ? SENDNB : SEND));
|
2010-04-26 16:43:59 +02:00
|
|
|
IPC_STATUS_ADD_CALL(dst_ptr, call);
|
2010-06-28 10:32:49 +02:00
|
|
|
|
|
|
|
if (dst_ptr->p_misc_flags & MF_REPLY_PEND)
|
|
|
|
dst_ptr->p_misc_flags &= ~MF_REPLY_PEND;
|
|
|
|
|
2009-11-10 10:11:13 +01:00
|
|
|
RTS_UNSET(dst_ptr, RTS_RECEIVING);
|
2010-06-24 15:31:40 +02:00
|
|
|
|
2011-02-08 14:54:33 +01:00
|
|
|
#if DEBUG_IPC_HOOK
|
|
|
|
hook_ipc_msgsend(&dst_ptr->p_delivermsg, caller_ptr, dst_ptr);
|
|
|
|
hook_ipc_msgrecv(&dst_ptr->p_delivermsg, caller_ptr, dst_ptr);
|
2010-06-24 15:31:40 +02:00
|
|
|
#endif
|
2008-11-19 13:26:10 +01:00
|
|
|
} else {
|
|
|
|
if(flags & NON_BLOCKING) {
|
|
|
|
return(ENOTREADY);
|
|
|
|
}
|
|
|
|
|
2009-12-17 00:32:08 +01:00
|
|
|
/* Check for a possible deadlock before actually blocking. */
|
2010-07-28 16:14:06 +02:00
|
|
|
if (deadlock(SEND, caller_ptr, dst_e)) {
|
2009-12-17 00:32:08 +01:00
|
|
|
return(ELOCKED);
|
|
|
|
}
|
|
|
|
|
2005-10-07 15:23:18 +02:00
|
|
|
/* Destination is not waiting. Block and dequeue caller. */
|
2010-03-29 13:16:37 +02:00
|
|
|
if (!(flags & FROM_KERNEL)) {
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
if(copy_msg_from_user(m_ptr, &caller_ptr->p_sendmsg))
|
2010-03-29 13:16:37 +02:00
|
|
|
return EFAULT;
|
2010-03-29 13:25:01 +02:00
|
|
|
} else {
|
2010-03-29 13:16:37 +02:00
|
|
|
caller_ptr->p_sendmsg = *m_ptr;
|
2010-03-29 13:25:01 +02:00
|
|
|
/*
|
|
|
|
* we need to remember that this message is from kernel so we
|
|
|
|
* can set the delivery status flags when the message is
|
|
|
|
* actually delivered
|
|
|
|
*/
|
|
|
|
caller_ptr->p_misc_flags |= MF_SENDING_FROM_KERNEL;
|
|
|
|
}
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
|
2009-11-10 10:11:13 +01:00
|
|
|
RTS_SET(caller_ptr, RTS_SENDING);
|
'proc number' is process slot, 'endpoint' are generation-aware process
instance numbers, encoded and decoded using macros in <minix/endpoint.h>.
proc number -> endpoint migration
. proc_nr in the interrupt hook is now an endpoint, proc_nr_e.
. m_source for messages and notifies is now an endpoint, instead of
proc number.
. isokendpt() converts an endpoint to a process number, returns
success (but fails if the process number is out of range, the
process slot is not a living process, or the given endpoint
number does not match the endpoint number in the process slot,
indicating an old process).
. okendpt() is the same as isokendpt(), but panic()s if the conversion
fails. This is mainly used for decoding message.m_source endpoints,
and other endpoint numbers in kernel data structures, which should
always be correct.
. if DEBUG_ENABLE_IPC_WARNINGS is enabled, isokendpt() and okendpt()
get passed the __FILE__ and __LINE__ of the calling lines, and
print messages about what is wrong with the endpoint number
(out of range proc, empty proc, or inconsistent endpoint number),
with the caller, making finding where the conversion failed easy
without having to include code for every call to print where things
went wrong. Sometimes this is harmless (wrong arg to a kernel call),
sometimes it's a fatal internal inconsistency (bogus m_source).
. some process table fields have been appended an _e to indicate it's
become and endpoint.
. process endpoint is stored in p_endpoint, without generation number.
it turns out the kernel never needs the generation number, except
when fork()ing, so it's decoded then.
. kernel calls all take endpoints as arguments, not proc numbers.
the one exception is sys_fork(), which needs to know in which slot
to put the child.
2006-03-03 11:00:02 +01:00
|
|
|
caller_ptr->p_sendto_e = dst_e;
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* Process is now blocked. Put in on the destination's queue. */
|
2010-06-23 12:36:19 +02:00
|
|
|
assert(caller_ptr->p_q_link == NULL);
|
2005-05-30 13:05:42 +02:00
|
|
|
xpp = &dst_ptr->p_caller_q; /* find end of list */
|
2010-03-28 11:54:32 +02:00
|
|
|
while (*xpp) xpp = &(*xpp)->p_q_link;
|
2005-05-30 13:05:42 +02:00
|
|
|
*xpp = caller_ptr; /* add caller to end */
|
2010-06-24 15:31:40 +02:00
|
|
|
|
2011-02-08 14:54:33 +01:00
|
|
|
#if DEBUG_IPC_HOOK
|
|
|
|
hook_ipc_msgsend(&caller_ptr->p_sendmsg, caller_ptr, dst_ptr);
|
2010-06-24 15:31:40 +02:00
|
|
|
#endif
|
2005-04-21 16:53:53 +02:00
|
|
|
}
|
|
|
|
return(OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
2005-06-07 14:34:25 +02:00
|
|
|
* mini_receive *
|
2005-04-21 16:53:53 +02:00
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static int mini_receive(struct proc * caller_ptr,
|
2010-06-26 23:13:36 +02:00
|
|
|
endpoint_t src_e, /* which message source is wanted */
|
2010-06-11 10:16:10 +02:00
|
|
|
message * m_buff_usr, /* pointer to message buffer */
|
|
|
|
const int flags)
|
2005-04-21 16:53:53 +02:00
|
|
|
{
|
2005-05-30 13:05:42 +02:00
|
|
|
/* A process or task wants to get a message. If a message is already queued,
|
2005-04-21 16:53:53 +02:00
|
|
|
* acquire it and deblock the sender. If no message from the desired source
|
2008-11-19 13:26:10 +01:00
|
|
|
* is available block the caller.
|
2005-04-21 16:53:53 +02:00
|
|
|
*/
|
2005-05-26 15:17:57 +02:00
|
|
|
register struct proc **xpp;
|
2011-06-09 16:09:13 +02:00
|
|
|
int r, src_id, src_proc_nr, src_p;
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
|
2010-03-10 14:00:05 +01:00
|
|
|
assert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
|
|
|
|
/* This is where we want our message. */
|
2010-06-11 10:16:10 +02:00
|
|
|
caller_ptr->p_delivermsg_vir = (vir_bytes) m_buff_usr;
|
'proc number' is process slot, 'endpoint' are generation-aware process
instance numbers, encoded and decoded using macros in <minix/endpoint.h>.
proc number -> endpoint migration
. proc_nr in the interrupt hook is now an endpoint, proc_nr_e.
. m_source for messages and notifies is now an endpoint, instead of
proc number.
. isokendpt() converts an endpoint to a process number, returns
success (but fails if the process number is out of range, the
process slot is not a living process, or the given endpoint
number does not match the endpoint number in the process slot,
indicating an old process).
. okendpt() is the same as isokendpt(), but panic()s if the conversion
fails. This is mainly used for decoding message.m_source endpoints,
and other endpoint numbers in kernel data structures, which should
always be correct.
. if DEBUG_ENABLE_IPC_WARNINGS is enabled, isokendpt() and okendpt()
get passed the __FILE__ and __LINE__ of the calling lines, and
print messages about what is wrong with the endpoint number
(out of range proc, empty proc, or inconsistent endpoint number),
with the caller, making finding where the conversion failed easy
without having to include code for every call to print where things
went wrong. Sometimes this is harmless (wrong arg to a kernel call),
sometimes it's a fatal internal inconsistency (bogus m_source).
. some process table fields have been appended an _e to indicate it's
become and endpoint.
. process endpoint is stored in p_endpoint, without generation number.
it turns out the kernel never needs the generation number, except
when fork()ing, so it's decoded then.
. kernel calls all take endpoints as arguments, not proc numbers.
the one exception is sys_fork(), which needs to know in which slot
to put the child.
2006-03-03 11:00:02 +01:00
|
|
|
|
|
|
|
if(src_e == ANY) src_p = ANY;
|
2006-03-15 13:01:59 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
okendpt(src_e, &src_p);
|
2009-11-10 10:11:13 +01:00
|
|
|
if (RTS_ISSET(proc_addr(src_p), RTS_NO_ENDPOINT))
|
2008-02-22 13:36:46 +01:00
|
|
|
{
|
2010-03-03 16:32:26 +01:00
|
|
|
return EDEADSRCDST;
|
2008-02-22 13:36:46 +01:00
|
|
|
}
|
2006-03-15 13:01:59 +01:00
|
|
|
}
|
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2009-11-10 10:11:13 +01:00
|
|
|
/* Check to see if a message from desired source is already available. The
|
|
|
|
* caller's RTS_SENDING flag may be set if SENDREC couldn't send. If it is
|
2005-05-26 15:17:57 +02:00
|
|
|
* set, the process should be blocked.
|
|
|
|
*/
|
2009-11-10 10:11:13 +01:00
|
|
|
if (!RTS_ISSET(caller_ptr, RTS_SENDING)) {
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2005-05-30 13:05:42 +02:00
|
|
|
/* Check if there are pending notifications, except for SENDREC. */
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
if (! (caller_ptr->p_misc_flags & MF_REPLY_PEND)) {
|
2011-01-07 18:04:43 +01:00
|
|
|
|
2011-04-08 17:03:33 +02:00
|
|
|
/* Check for pending notifications */
|
2011-10-31 16:21:08 +01:00
|
|
|
if ((src_id = has_pending_notify(caller_ptr, src_p)) != NULL_PRIV_ID) {
|
2011-01-07 18:04:43 +01:00
|
|
|
endpoint_t hisep;
|
|
|
|
|
2005-07-14 17:12:12 +02:00
|
|
|
src_proc_nr = id_to_nr(src_id); /* get source proc */
|
2005-10-20 22:59:02 +02:00
|
|
|
#if DEBUG_ENABLE_IPC_WARNINGS
|
|
|
|
if(src_proc_nr == NONE) {
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("mini_receive: sending notify from NONE\n");
|
2005-10-20 22:59:02 +02:00
|
|
|
}
|
|
|
|
#endif
|
2012-08-15 13:12:11 +02:00
|
|
|
assert(src_proc_nr != NONE);
|
2011-10-31 16:21:08 +01:00
|
|
|
unset_notify_pending(caller_ptr, src_id); /* no longer pending */
|
2005-07-14 17:12:12 +02:00
|
|
|
|
|
|
|
/* Found a suitable source, deliver the notification message. */
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
hisep = proc_addr(src_proc_nr)->p_endpoint;
|
2010-03-10 14:00:05 +01:00
|
|
|
assert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
|
|
|
|
assert(src_e == ANY || hisep == src_e);
|
2010-03-29 13:16:37 +02:00
|
|
|
|
|
|
|
/* assemble message */
|
|
|
|
BuildNotifyMessage(&caller_ptr->p_delivermsg, src_proc_nr, caller_ptr);
|
|
|
|
caller_ptr->p_delivermsg.m_source = hisep;
|
|
|
|
caller_ptr->p_misc_flags |= MF_DELIVERMSG;
|
|
|
|
|
2010-04-26 16:43:59 +02:00
|
|
|
IPC_STATUS_ADD_CALL(caller_ptr, NOTIFY);
|
2010-03-29 13:16:37 +02:00
|
|
|
|
2010-06-28 10:32:49 +02:00
|
|
|
goto receive_done;
|
2005-07-14 17:12:12 +02:00
|
|
|
}
|
2005-05-19 16:05:51 +02:00
|
|
|
}
|
2005-07-14 17:12:12 +02:00
|
|
|
|
2011-04-08 17:03:33 +02:00
|
|
|
/* Check for pending asynchronous messages */
|
2011-10-31 16:21:08 +01:00
|
|
|
if (has_pending_asend(caller_ptr, src_p) != NULL_PRIV_ID) {
|
2011-04-08 17:03:33 +02:00
|
|
|
if (src_p != ANY)
|
|
|
|
r = try_one(proc_addr(src_p), caller_ptr);
|
|
|
|
else
|
|
|
|
r = try_async(caller_ptr);
|
2010-03-27 01:09:22 +01:00
|
|
|
|
|
|
|
if (r == OK) {
|
2011-04-08 17:03:33 +02:00
|
|
|
IPC_STATUS_ADD_CALL(caller_ptr, SENDA);
|
|
|
|
goto receive_done;
|
|
|
|
}
|
2010-03-27 01:09:22 +01:00
|
|
|
}
|
|
|
|
|
2005-07-14 17:12:12 +02:00
|
|
|
/* Check caller queue. Use pointer pointers to keep code simple. */
|
|
|
|
xpp = &caller_ptr->p_caller_q;
|
2010-03-28 11:54:32 +02:00
|
|
|
while (*xpp) {
|
2010-06-23 12:36:19 +02:00
|
|
|
struct proc * sender = *xpp;
|
|
|
|
|
|
|
|
if (src_e == ANY || src_p == proc_nr(sender)) {
|
2010-03-23 01:09:11 +01:00
|
|
|
int call;
|
2010-06-23 12:36:19 +02:00
|
|
|
assert(!RTS_ISSET(sender, RTS_SLOT_FREE));
|
|
|
|
assert(!RTS_ISSET(sender, RTS_NO_ENDPOINT));
|
2006-03-08 13:06:33 +01:00
|
|
|
|
2005-07-14 17:12:12 +02:00
|
|
|
/* Found acceptable message. Copy it and update status. */
|
2010-03-10 14:00:05 +01:00
|
|
|
assert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
|
2010-06-23 12:36:19 +02:00
|
|
|
caller_ptr->p_delivermsg = sender->p_sendmsg;
|
|
|
|
caller_ptr->p_delivermsg.m_source = sender->p_endpoint;
|
2010-03-29 13:16:37 +02:00
|
|
|
caller_ptr->p_misc_flags |= MF_DELIVERMSG;
|
2010-06-23 12:36:19 +02:00
|
|
|
RTS_UNSET(sender, RTS_SENDING);
|
2010-03-29 13:16:37 +02:00
|
|
|
|
2010-06-23 12:36:19 +02:00
|
|
|
call = (sender->p_misc_flags & MF_REPLY_PEND ? SENDREC : SEND);
|
2010-04-26 16:43:59 +02:00
|
|
|
IPC_STATUS_ADD_CALL(caller_ptr, call);
|
2010-03-29 13:25:01 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if the message is originaly from the kernel on behalf of this
|
|
|
|
* process, we must send the status flags accordingly
|
|
|
|
*/
|
2010-06-23 12:36:19 +02:00
|
|
|
if (sender->p_misc_flags & MF_SENDING_FROM_KERNEL) {
|
2010-04-26 16:43:59 +02:00
|
|
|
IPC_STATUS_ADD_FLAGS(caller_ptr, IPC_FLG_MSG_FROM_KERNEL);
|
2010-03-29 13:25:01 +02:00
|
|
|
/* we can clean the flag now, not need anymore */
|
2010-06-23 12:36:19 +02:00
|
|
|
sender->p_misc_flags &= ~MF_SENDING_FROM_KERNEL;
|
2010-03-29 13:25:01 +02:00
|
|
|
}
|
2010-06-23 12:36:19 +02:00
|
|
|
if (sender->p_misc_flags & MF_SIG_DELAY)
|
|
|
|
sig_delay_done(sender);
|
2010-03-29 13:16:37 +02:00
|
|
|
|
2011-02-08 14:54:33 +01:00
|
|
|
#if DEBUG_IPC_HOOK
|
|
|
|
hook_ipc_msgrecv(&caller_ptr->p_delivermsg, *xpp, caller_ptr);
|
2010-06-24 15:31:40 +02:00
|
|
|
#endif
|
|
|
|
|
2010-06-23 12:36:19 +02:00
|
|
|
*xpp = sender->p_q_link; /* remove from queue */
|
|
|
|
sender->p_q_link = NULL;
|
2010-06-28 10:32:49 +02:00
|
|
|
goto receive_done;
|
2005-07-14 17:12:12 +02:00
|
|
|
}
|
2010-06-23 12:36:19 +02:00
|
|
|
xpp = &sender->p_q_link; /* proceed to next */
|
2005-07-14 17:12:12 +02:00
|
|
|
}
|
2005-04-21 16:53:53 +02:00
|
|
|
}
|
|
|
|
|
2005-05-27 14:44:14 +02:00
|
|
|
/* No suitable message is available or the caller couldn't send in SENDREC.
|
2005-05-26 15:17:57 +02:00
|
|
|
* Block the process trying to receive, unless the flags tell otherwise.
|
2005-04-21 16:53:53 +02:00
|
|
|
*/
|
2005-05-27 14:44:14 +02:00
|
|
|
if ( ! (flags & NON_BLOCKING)) {
|
2009-12-17 00:32:08 +01:00
|
|
|
/* Check for a possible deadlock before actually blocking. */
|
2010-07-28 16:14:06 +02:00
|
|
|
if (deadlock(RECEIVE, caller_ptr, src_e)) {
|
2009-12-17 00:32:08 +01:00
|
|
|
return(ELOCKED);
|
|
|
|
}
|
|
|
|
|
'proc number' is process slot, 'endpoint' are generation-aware process
instance numbers, encoded and decoded using macros in <minix/endpoint.h>.
proc number -> endpoint migration
. proc_nr in the interrupt hook is now an endpoint, proc_nr_e.
. m_source for messages and notifies is now an endpoint, instead of
proc number.
. isokendpt() converts an endpoint to a process number, returns
success (but fails if the process number is out of range, the
process slot is not a living process, or the given endpoint
number does not match the endpoint number in the process slot,
indicating an old process).
. okendpt() is the same as isokendpt(), but panic()s if the conversion
fails. This is mainly used for decoding message.m_source endpoints,
and other endpoint numbers in kernel data structures, which should
always be correct.
. if DEBUG_ENABLE_IPC_WARNINGS is enabled, isokendpt() and okendpt()
get passed the __FILE__ and __LINE__ of the calling lines, and
print messages about what is wrong with the endpoint number
(out of range proc, empty proc, or inconsistent endpoint number),
with the caller, making finding where the conversion failed easy
without having to include code for every call to print where things
went wrong. Sometimes this is harmless (wrong arg to a kernel call),
sometimes it's a fatal internal inconsistency (bogus m_source).
. some process table fields have been appended an _e to indicate it's
become and endpoint.
. process endpoint is stored in p_endpoint, without generation number.
it turns out the kernel never needs the generation number, except
when fork()ing, so it's decoded then.
. kernel calls all take endpoints as arguments, not proc numbers.
the one exception is sys_fork(), which needs to know in which slot
to put the child.
2006-03-03 11:00:02 +01:00
|
|
|
caller_ptr->p_getfrom_e = src_e;
|
2009-11-10 10:11:13 +01:00
|
|
|
RTS_SET(caller_ptr, RTS_RECEIVING);
|
2005-04-21 16:53:53 +02:00
|
|
|
return(OK);
|
|
|
|
} else {
|
2008-02-22 13:36:46 +01:00
|
|
|
return(ENOTREADY);
|
2005-04-21 16:53:53 +02:00
|
|
|
}
|
2010-06-28 10:32:49 +02:00
|
|
|
|
|
|
|
receive_done:
|
|
|
|
if (caller_ptr->p_misc_flags & MF_REPLY_PEND)
|
|
|
|
caller_ptr->p_misc_flags &= ~MF_REPLY_PEND;
|
|
|
|
return OK;
|
2005-04-21 16:53:53 +02:00
|
|
|
}
|
|
|
|
|
2005-07-14 17:12:12 +02:00
|
|
|
/*===========================================================================*
|
2005-07-27 16:32:16 +02:00
|
|
|
* mini_notify *
|
2005-07-14 17:12:12 +02:00
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int mini_notify(
|
2010-03-27 15:31:00 +01:00
|
|
|
const struct proc *caller_ptr, /* sender of the notification */
|
|
|
|
endpoint_t dst_e /* which process to notify */
|
|
|
|
)
|
2005-07-14 17:12:12 +02:00
|
|
|
{
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
register struct proc *dst_ptr;
|
2005-07-14 17:12:12 +02:00
|
|
|
int src_id; /* source id for late delivery */
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
int dst_p;
|
|
|
|
|
|
|
|
if (!isokendpt(dst_e, &dst_p)) {
|
|
|
|
util_stacktrace();
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("mini_notify: bogus endpoint %d\n", dst_e);
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
return EDEADSRCDST;
|
|
|
|
}
|
|
|
|
|
|
|
|
dst_ptr = proc_addr(dst_p);
|
2005-07-14 17:12:12 +02:00
|
|
|
|
|
|
|
/* Check to see if target is blocked waiting for this message. A process
|
|
|
|
* can be both sending and receiving during a SENDREC system call.
|
|
|
|
*/
|
2009-01-20 16:47:00 +01:00
|
|
|
if (WILLRECEIVE(dst_ptr, caller_ptr->p_endpoint) &&
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
! (dst_ptr->p_misc_flags & MF_REPLY_PEND)) {
|
2005-07-14 17:12:12 +02:00
|
|
|
/* Destination is indeed waiting for a message. Assemble a notification
|
|
|
|
* message and deliver it. Copy from pseudo-source HARDWARE, since the
|
|
|
|
* message is in the kernel's address space.
|
|
|
|
*/
|
2010-03-10 14:00:05 +01:00
|
|
|
assert(!(dst_ptr->p_misc_flags & MF_DELIVERMSG));
|
2010-03-29 13:16:37 +02:00
|
|
|
|
|
|
|
BuildNotifyMessage(&dst_ptr->p_delivermsg, proc_nr(caller_ptr), dst_ptr);
|
|
|
|
dst_ptr->p_delivermsg.m_source = caller_ptr->p_endpoint;
|
|
|
|
dst_ptr->p_misc_flags |= MF_DELIVERMSG;
|
|
|
|
|
2010-04-26 16:43:59 +02:00
|
|
|
IPC_STATUS_ADD_CALL(dst_ptr, NOTIFY);
|
2009-11-10 10:11:13 +01:00
|
|
|
RTS_UNSET(dst_ptr, RTS_RECEIVING);
|
2010-03-29 13:16:37 +02:00
|
|
|
|
2005-07-14 17:12:12 +02:00
|
|
|
return(OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Destination is not ready to receive the notification. Add it to the
|
2009-12-11 01:08:19 +01:00
|
|
|
* bit map with pending notifications. Note the indirectness: the privilege id
|
2005-07-14 17:12:12 +02:00
|
|
|
* instead of the process number is used in the pending bit map.
|
|
|
|
*/
|
|
|
|
src_id = priv(caller_ptr)->s_id;
|
|
|
|
set_sys_bit(priv(dst_ptr)->s_notify_pending, src_id);
|
|
|
|
return(OK);
|
|
|
|
}
|
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
#define ASCOMPLAIN(caller, entry, field) \
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("kernel:%s:%d: asyn failed for %s in %s " \
|
2008-11-19 13:26:10 +01:00
|
|
|
"(%d/%d, tab 0x%lx)\n",__FILE__,__LINE__, \
|
|
|
|
field, caller->p_name, entry, priv(caller)->s_asynsize, priv(caller)->s_asyntab)
|
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
#define A_RETR_FLD(entry, field) \
|
2008-11-19 13:26:10 +01:00
|
|
|
if(data_copy(caller_ptr->p_endpoint, \
|
|
|
|
table_v + (entry)*sizeof(asynmsg_t) + offsetof(struct asynmsg,field),\
|
2010-02-09 16:20:09 +01:00
|
|
|
KERNEL, (vir_bytes) &tabent.field, \
|
2008-11-19 13:26:10 +01:00
|
|
|
sizeof(tabent.field)) != OK) {\
|
|
|
|
ASCOMPLAIN(caller_ptr, entry, #field); \
|
2011-10-25 20:32:30 +02:00
|
|
|
r = EFAULT; \
|
|
|
|
goto asyn_error; \
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
#define A_RETR(entry) do { \
|
|
|
|
if (data_copy( \
|
|
|
|
caller_ptr->p_endpoint, table_v + (entry)*sizeof(asynmsg_t),\
|
|
|
|
KERNEL, (vir_bytes) &tabent, \
|
|
|
|
sizeof(tabent)) != OK) { \
|
|
|
|
ASCOMPLAIN(caller_ptr, entry, "message entry"); \
|
2011-10-25 20:32:30 +02:00
|
|
|
r = EFAULT; \
|
|
|
|
goto asyn_error; \
|
2011-04-08 16:53:55 +02:00
|
|
|
} \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#define A_INSRT_FLD(entry, field) \
|
2010-02-09 16:20:09 +01:00
|
|
|
if(data_copy(KERNEL, (vir_bytes) &tabent.field, \
|
2008-11-19 13:26:10 +01:00
|
|
|
caller_ptr->p_endpoint, \
|
|
|
|
table_v + (entry)*sizeof(asynmsg_t) + offsetof(struct asynmsg,field),\
|
|
|
|
sizeof(tabent.field)) != OK) {\
|
|
|
|
ASCOMPLAIN(caller_ptr, entry, #field); \
|
2011-10-25 20:32:30 +02:00
|
|
|
r = EFAULT; \
|
|
|
|
goto asyn_error; \
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
#define A_INSRT(entry) do { \
|
|
|
|
if (data_copy(KERNEL, (vir_bytes) &tabent, \
|
|
|
|
caller_ptr->p_endpoint, table_v + (entry)*sizeof(asynmsg_t),\
|
|
|
|
sizeof(tabent)) != OK) { \
|
|
|
|
ASCOMPLAIN(caller_ptr, entry, "message entry"); \
|
2011-10-25 20:32:30 +02:00
|
|
|
r = EFAULT; \
|
|
|
|
goto asyn_error; \
|
2011-04-08 16:53:55 +02:00
|
|
|
} \
|
|
|
|
} while(0)
|
|
|
|
|
2007-04-23 16:24:30 +02:00
|
|
|
/*===========================================================================*
|
2011-10-25 20:32:30 +02:00
|
|
|
* try_deliver_senda *
|
2007-04-23 16:24:30 +02:00
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int try_deliver_senda(struct proc *caller_ptr,
|
2011-10-25 20:32:30 +02:00
|
|
|
asynmsg_t *table,
|
|
|
|
size_t size)
|
2007-04-23 16:24:30 +02:00
|
|
|
{
|
2011-10-31 17:03:26 +01:00
|
|
|
int r, dst_p, done, do_notify;
|
2011-04-08 17:03:33 +02:00
|
|
|
unsigned int i;
|
2011-04-08 16:53:55 +02:00
|
|
|
unsigned flags;
|
|
|
|
endpoint_t dst;
|
|
|
|
struct proc *dst_ptr;
|
|
|
|
struct priv *privp;
|
|
|
|
asynmsg_t tabent;
|
|
|
|
const vir_bytes table_v = (vir_bytes) table;
|
|
|
|
|
|
|
|
privp = priv(caller_ptr);
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
/* Clear table */
|
2011-10-25 20:32:30 +02:00
|
|
|
privp->s_asyntab = -1;
|
2011-04-08 16:53:55 +02:00
|
|
|
privp->s_asynsize = 0;
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
if (size == 0) return(OK); /* Nothing to do, just return */
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-10-25 20:32:30 +02:00
|
|
|
/* Scan the table */
|
|
|
|
do_notify = FALSE;
|
|
|
|
done = TRUE;
|
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
/* Limit size to something reasonable. An arbitrary choice is 16
|
|
|
|
* times the number of process table entries.
|
|
|
|
*
|
|
|
|
* (this check has been duplicated in sys_call but is left here
|
|
|
|
* as a sanity check)
|
|
|
|
*/
|
2011-10-25 20:32:30 +02:00
|
|
|
if (size > 16*(NR_TASKS + NR_PROCS)) {
|
|
|
|
r = EDOM;
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
/* Process each entry in the table and store the result in the table.
|
|
|
|
* If we're done handling a message, copy the result to the sender. */
|
|
|
|
|
2011-10-25 20:32:30 +02:00
|
|
|
dst = NONE;
|
2011-04-08 16:53:55 +02:00
|
|
|
/* Copy message to kernel */
|
|
|
|
A_RETR(i);
|
|
|
|
flags = tabent.flags;
|
|
|
|
dst = tabent.dst;
|
|
|
|
|
|
|
|
if (flags == 0) continue; /* Skip empty entries */
|
|
|
|
|
|
|
|
/* 'flags' field must contain only valid bits */
|
2011-10-25 20:32:30 +02:00
|
|
|
if(flags & ~(AMF_VALID|AMF_DONE|AMF_NOTIFY|AMF_NOREPLY|AMF_NOTIFY_ERR)) {
|
|
|
|
r = EINVAL;
|
|
|
|
goto asyn_error;
|
|
|
|
}
|
|
|
|
if (!(flags & AMF_VALID)) { /* Must contain message */
|
|
|
|
r = EINVAL;
|
|
|
|
goto asyn_error;
|
|
|
|
}
|
2011-04-08 16:53:55 +02:00
|
|
|
if (flags & AMF_DONE) continue; /* Already done processing */
|
|
|
|
|
2011-10-31 17:03:26 +01:00
|
|
|
r = OK;
|
2011-04-08 16:53:55 +02:00
|
|
|
if (!isokendpt(tabent.dst, &dst_p))
|
|
|
|
r = EDEADSRCDST; /* Bad destination, report the error */
|
|
|
|
else if (iskerneln(dst_p))
|
|
|
|
r = ECALLDENIED; /* Asyn sends to the kernel are not allowed */
|
|
|
|
else if (!may_send_to(caller_ptr, dst_p))
|
|
|
|
r = ECALLDENIED; /* Send denied by IPC mask */
|
|
|
|
else /* r == OK */
|
|
|
|
dst_ptr = proc_addr(dst_p);
|
2009-11-28 14:15:07 +01:00
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
/* XXX: RTS_NO_ENDPOINT should be removed */
|
|
|
|
if (r == OK && RTS_ISSET(dst_ptr, RTS_NO_ENDPOINT)) {
|
|
|
|
r = EDEADSRCDST;
|
|
|
|
}
|
2009-07-02 18:25:31 +02:00
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
/* Check if 'dst' is blocked waiting for this message.
|
|
|
|
* If AMF_NOREPLY is set, do not satisfy the receiving part of
|
|
|
|
* a SENDREC.
|
|
|
|
*/
|
|
|
|
if (r == OK && WILLRECEIVE(dst_ptr, caller_ptr->p_endpoint) &&
|
|
|
|
(!(flags&AMF_NOREPLY) || !(dst_ptr->p_misc_flags&MF_REPLY_PEND))) {
|
|
|
|
/* Destination is indeed waiting for this message. */
|
|
|
|
dst_ptr->p_delivermsg = tabent.msg;
|
|
|
|
dst_ptr->p_delivermsg.m_source = caller_ptr->p_endpoint;
|
|
|
|
dst_ptr->p_misc_flags |= MF_DELIVERMSG;
|
|
|
|
IPC_STATUS_ADD_CALL(dst_ptr, SENDA);
|
|
|
|
RTS_UNSET(dst_ptr, RTS_RECEIVING);
|
2013-05-01 21:02:06 +02:00
|
|
|
#if DEBUG_IPC_HOOK
|
|
|
|
hook_ipc_msgrecv(&dst_ptr->p_delivermsg, caller_ptr, dst_ptr);
|
|
|
|
#endif
|
2011-04-08 16:53:55 +02:00
|
|
|
} else if (r == OK) {
|
2011-04-08 17:03:33 +02:00
|
|
|
/* Inform receiver that something is pending */
|
|
|
|
set_sys_bit(priv(dst_ptr)->s_asyn_pending,
|
|
|
|
priv(caller_ptr)->s_id);
|
2011-11-04 19:25:27 +01:00
|
|
|
done = FALSE;
|
|
|
|
continue;
|
2011-04-08 16:53:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Store results */
|
|
|
|
tabent.result = r;
|
2011-11-04 19:25:27 +01:00
|
|
|
tabent.flags = flags | AMF_DONE;
|
|
|
|
if (flags & AMF_NOTIFY)
|
|
|
|
do_notify = TRUE;
|
|
|
|
else if (r != OK && (flags & AMF_NOTIFY_ERR))
|
|
|
|
do_notify = TRUE;
|
|
|
|
A_INSRT(i); /* Copy results to caller */
|
2011-10-25 20:32:30 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
asyn_error:
|
|
|
|
if (dst != NONE)
|
|
|
|
printf("KERNEL senda error %d to %d\n", r, dst);
|
|
|
|
else
|
|
|
|
printf("KERNEL senda error %d\n", r);
|
2011-04-08 16:53:55 +02:00
|
|
|
}
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-04-08 17:14:48 +02:00
|
|
|
if (do_notify)
|
|
|
|
mini_notify(proc_addr(ASYNCM), caller_ptr->p_endpoint);
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
if (!done) {
|
|
|
|
privp->s_asyntab = (vir_bytes) table;
|
|
|
|
privp->s_asynsize = size;
|
|
|
|
}
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
return(OK);
|
2011-10-25 20:32:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* mini_senda *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static int mini_senda(struct proc *caller_ptr, asynmsg_t *table, size_t size)
|
2011-10-25 20:32:30 +02:00
|
|
|
{
|
|
|
|
struct priv *privp;
|
|
|
|
|
|
|
|
privp = priv(caller_ptr);
|
|
|
|
if (!(privp->s_flags & SYS_PROC)) {
|
|
|
|
printf( "mini_senda: warning caller has no privilege structure\n");
|
|
|
|
return(EPERM);
|
|
|
|
}
|
|
|
|
|
|
|
|
return try_deliver_senda(caller_ptr, table, size);
|
2007-04-23 16:24:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* try_async *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static int try_async(caller_ptr)
|
2007-04-23 16:24:30 +02:00
|
|
|
struct proc *caller_ptr;
|
|
|
|
{
|
2011-04-08 16:53:55 +02:00
|
|
|
int r;
|
|
|
|
struct priv *privp;
|
|
|
|
struct proc *src_ptr;
|
2011-04-19 00:56:34 +02:00
|
|
|
sys_map_t *map;
|
|
|
|
|
|
|
|
map = &priv(caller_ptr)->s_asyn_pending;
|
2011-04-08 16:53:55 +02:00
|
|
|
|
|
|
|
/* Try all privilege structures */
|
|
|
|
for (privp = BEG_PRIV_ADDR; privp < END_PRIV_ADDR; ++privp) {
|
|
|
|
if (privp->s_proc_nr == NONE)
|
|
|
|
continue;
|
|
|
|
|
2011-04-19 00:56:34 +02:00
|
|
|
if (!get_sys_bit(*map, privp->s_id))
|
|
|
|
continue;
|
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
src_ptr = proc_addr(privp->s_proc_nr);
|
|
|
|
|
2011-10-25 20:32:30 +02:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/*
|
|
|
|
* Do not copy from a process which does not have a stable address space
|
|
|
|
* due to VM fiddling with it
|
|
|
|
*/
|
|
|
|
if (RTS_ISSET(src_ptr, RTS_VMINHIBIT)) {
|
|
|
|
src_ptr->p_misc_flags |= MF_SENDA_VM_MISS;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
assert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
|
2011-04-08 17:03:33 +02:00
|
|
|
if ((r = try_one(src_ptr, caller_ptr)) == OK)
|
2011-04-08 16:53:55 +02:00
|
|
|
return(r);
|
|
|
|
}
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
return(ESRCH);
|
2007-04-23 16:24:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* try_one *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static int try_one(struct proc *src_ptr, struct proc *dst_ptr)
|
2007-04-23 16:24:30 +02:00
|
|
|
{
|
2011-04-08 17:03:33 +02:00
|
|
|
/* Try to receive an asynchronous message from 'src_ptr' */
|
2011-06-09 16:09:13 +02:00
|
|
|
int r = EAGAIN, done, do_notify;
|
2011-04-08 17:03:33 +02:00
|
|
|
unsigned int flags, i;
|
2011-04-08 16:53:55 +02:00
|
|
|
size_t size;
|
|
|
|
endpoint_t dst;
|
|
|
|
struct proc *caller_ptr;
|
|
|
|
struct priv *privp;
|
|
|
|
asynmsg_t tabent;
|
|
|
|
vir_bytes table_v;
|
|
|
|
|
|
|
|
privp = priv(src_ptr);
|
|
|
|
if (!(privp->s_flags & SYS_PROC)) return(EPERM);
|
|
|
|
size = privp->s_asynsize;
|
|
|
|
table_v = privp->s_asyntab;
|
|
|
|
|
2011-04-08 17:03:33 +02:00
|
|
|
/* Clear table pending message flag. We're done unless we're not. */
|
|
|
|
unset_sys_bit(priv(dst_ptr)->s_asyn_pending, privp->s_id);
|
2011-04-08 16:53:55 +02:00
|
|
|
|
|
|
|
if (size == 0) return(EAGAIN);
|
2011-04-08 17:14:48 +02:00
|
|
|
if (!may_send_to(src_ptr, proc_nr(dst_ptr))) return(ECALLDENIED);
|
2011-04-08 16:53:55 +02:00
|
|
|
|
2011-04-08 17:14:48 +02:00
|
|
|
caller_ptr = src_ptr; /* Needed for A_ macros later on */
|
2011-04-08 16:53:55 +02:00
|
|
|
|
|
|
|
/* Scan the table */
|
2011-04-19 00:56:34 +02:00
|
|
|
do_notify = FALSE;
|
2011-04-08 16:53:55 +02:00
|
|
|
done = TRUE;
|
2011-10-25 20:32:30 +02:00
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
/* Process each entry in the table and store the result in the table.
|
|
|
|
* If we're done handling a message, copy the result to the sender.
|
|
|
|
* Some checks done in mini_senda are duplicated here, as the sender
|
2011-04-19 00:56:34 +02:00
|
|
|
* could've altered the contents of the table in the meantime.
|
2011-04-08 16:53:55 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Copy message to kernel */
|
|
|
|
A_RETR(i);
|
|
|
|
flags = tabent.flags;
|
|
|
|
dst = tabent.dst;
|
|
|
|
|
|
|
|
if (flags == 0) continue; /* Skip empty entries */
|
|
|
|
|
|
|
|
/* 'flags' field must contain only valid bits */
|
2011-04-08 17:14:48 +02:00
|
|
|
if(flags & ~(AMF_VALID|AMF_DONE|AMF_NOTIFY|AMF_NOREPLY|AMF_NOTIFY_ERR))
|
|
|
|
r = EINVAL;
|
|
|
|
else if (!(flags & AMF_VALID)) /* Must contain message */
|
|
|
|
r = EINVAL;
|
|
|
|
else if (flags & AMF_DONE) continue; /* Already done processing */
|
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
/* Clear done flag. The sender is done sending when all messages in the
|
|
|
|
* table are marked done or empty. However, we will know that only
|
|
|
|
* the next time we enter this function or when the sender decides to
|
|
|
|
* send additional asynchronous messages and manages to deliver them
|
|
|
|
* all.
|
|
|
|
*/
|
|
|
|
done = FALSE;
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-04-19 00:56:34 +02:00
|
|
|
if (r == EINVAL)
|
|
|
|
goto store_result;
|
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
/* Message must be directed at receiving end */
|
|
|
|
if (dst != dst_ptr->p_endpoint) continue;
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
/* If AMF_NOREPLY is set, then this message is not a reply to a
|
|
|
|
* SENDREC and thus should not satisfy the receiving part of the
|
|
|
|
* SENDREC. This message is to be delivered later.
|
|
|
|
*/
|
2011-04-08 17:03:33 +02:00
|
|
|
if ((flags & AMF_NOREPLY) && (dst_ptr->p_misc_flags & MF_REPLY_PEND))
|
2011-04-08 16:53:55 +02:00
|
|
|
continue;
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
/* Destination is ready to receive the message; deliver it */
|
2011-04-08 17:14:48 +02:00
|
|
|
r = OK;
|
2011-04-08 16:53:55 +02:00
|
|
|
dst_ptr->p_delivermsg = tabent.msg;
|
|
|
|
dst_ptr->p_delivermsg.m_source = src_ptr->p_endpoint;
|
|
|
|
dst_ptr->p_misc_flags |= MF_DELIVERMSG;
|
2013-05-01 21:02:06 +02:00
|
|
|
#if DEBUG_IPC_HOOK
|
|
|
|
hook_ipc_msgrecv(&dst_ptr->p_delivermsg, src_ptr, dst_ptr);
|
|
|
|
#endif
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-04-08 17:14:48 +02:00
|
|
|
store_result:
|
2011-04-08 16:53:55 +02:00
|
|
|
/* Store results for sender */
|
2011-04-08 17:14:48 +02:00
|
|
|
tabent.result = r;
|
2011-04-08 16:53:55 +02:00
|
|
|
tabent.flags = flags | AMF_DONE;
|
2011-04-08 17:14:48 +02:00
|
|
|
if (flags & AMF_NOTIFY) do_notify = TRUE;
|
|
|
|
else if (r != OK && (flags & AMF_NOTIFY_ERR)) do_notify = TRUE;
|
2011-04-08 16:53:55 +02:00
|
|
|
A_INSRT(i); /* Copy results to sender */
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
|
2011-04-08 16:53:55 +02:00
|
|
|
break;
|
|
|
|
}
|
Merge of David's ptrace branch. Summary:
o Support for ptrace T_ATTACH/T_DETACH and T_SYSCALL
o PM signal handling logic should now work properly, even with debuggers
being present
o Asynchronous PM/VFS protocol, full IPC support for senda(), and
AMF_NOREPLY senda() flag
DETAILS
Process stop and delay call handling of PM:
o Added sys_runctl() kernel call with sys_stop() and sys_resume()
aliases, for PM to stop and resume a process
o Added exception for sending/syscall-traced processes to sys_runctl(),
and matching SIGKREADY pseudo-signal to PM
o Fixed PM signal logic to deal with requests from a process after
stopping it (so-called "delay calls"), using the SIGKREADY facility
o Fixed various PM panics due to race conditions with delay calls versus
VFS calls
o Removed special PRIO_STOP priority value
o Added SYS_LOCK RTS kernel flag, to stop an individual process from
running while modifying its process structure
Signal and debugger handling in PM:
o Fixed debugger signals being dropped if a second signal arrives when
the debugger has not retrieved the first one
o Fixed debugger signals being sent to the debugger more than once
o Fixed debugger signals unpausing process in VFS; removed PM_UNPAUSE_TR
protocol message
o Detached debugger signals from general signal logic and from being
blocked on VFS calls, meaning that even VFS can now be traced
o Fixed debugger being unable to receive more than one pending signal in
one process stop
o Fixed signal delivery being delayed needlessly when multiple signals
are pending
o Fixed wait test for tracer, which was returning for children that were
not waited for
o Removed second parallel pending call from PM to VFS for any process
o Fixed process becoming runnable between exec() and debugger trap
o Added support for notifying the debugger before the parent when a
debugged child exits
o Fixed debugger death causing child to remain stopped forever
o Fixed consistently incorrect use of _NSIG
Extensions to ptrace():
o Added T_ATTACH and T_DETACH ptrace request, to attach and detach a
debugger to and from a process
o Added T_SYSCALL ptrace request, to trace system calls
o Added T_SETOPT ptrace request, to set trace options
o Added TO_TRACEFORK trace option, to attach automatically to children
of a traced process
o Added TO_ALTEXEC trace option, to send SIGSTOP instead of SIGTRAP upon
a successful exec() of the tracee
o Extended T_GETUSER ptrace support to allow retrieving a process's priv
structure
o Removed T_STOP ptrace request again, as it does not help implementing
debuggers properly
o Added MINIX3-specific ptrace test (test42)
o Added proper manual page for ptrace(2)
Asynchronous PM/VFS interface:
o Fixed asynchronous messages not being checked when receive() is called
with an endpoint other than ANY
o Added AMF_NOREPLY senda() flag, preventing such messages from
satisfying the receive part of a sendrec()
o Added asynsend3() that takes optional flags; asynsend() is now a
#define passing in 0 as third parameter
o Made PM/VFS protocol asynchronous; reintroduced tell_fs()
o Made PM_BASE request/reply number range unique
o Hacked in a horrible temporary workaround into RS to deal with newly
revealed RS-PM-VFS race condition triangle until VFS is asynchronous
System signal handling:
o Fixed shutdown logic of device drivers; removed old SIGKSTOP signal
o Removed is-superuser check from PM's do_procstat() (aka getsigset())
o Added sigset macros to allow system processes to deal with the full
signal set, rather than just the POSIX subset
Miscellaneous PM fixes:
o Split do_getset into do_get and do_set, merging common code and making
structure clearer
o Fixed setpriority() being able to put to sleep processes using an
invalid parameter, or revive zombie processes
o Made find_proc() global; removed obsolete proc_from_pid()
o Cleanup here and there
Also included:
o Fixed false-positive boot order kernel warning
o Removed last traces of old NOTIFY_FROM code
THINGS OF POSSIBLE INTEREST
o It should now be possible to run PM at any priority, even lower than
user processes
o No assumptions are made about communication speed between PM and VFS,
although communication must be FIFO
o A debugger will now receive incoming debuggee signals at kill time
only; the process may not yet be fully stopped
o A first step has been made towards making the SYSTEM task preemptible
2009-09-30 11:57:22 +02:00
|
|
|
|
2011-04-08 17:14:48 +02:00
|
|
|
if (do_notify)
|
|
|
|
mini_notify(proc_addr(ASYNCM), src_ptr->p_endpoint);
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-04-19 00:56:34 +02:00
|
|
|
if (done) {
|
|
|
|
privp->s_asyntab = -1;
|
|
|
|
privp->s_asynsize = 0;
|
|
|
|
} else {
|
|
|
|
set_sys_bit(priv(dst_ptr)->s_asyn_pending, privp->s_id);
|
2011-04-08 16:53:55 +02:00
|
|
|
}
|
2007-04-23 16:24:30 +02:00
|
|
|
|
2011-10-25 20:32:30 +02:00
|
|
|
asyn_error:
|
2011-04-08 16:53:55 +02:00
|
|
|
return(r);
|
2007-04-23 16:24:30 +02:00
|
|
|
}
|
|
|
|
|
2011-04-08 17:14:48 +02:00
|
|
|
/*===========================================================================*
|
|
|
|
* cancel_async *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int cancel_async(struct proc *src_ptr, struct proc *dst_ptr)
|
2011-04-08 17:14:48 +02:00
|
|
|
{
|
|
|
|
/* Cancel asynchronous messages from src to dst, because dst is not interested
|
|
|
|
* in them (e.g., dst has been restarted) */
|
2011-06-09 16:09:13 +02:00
|
|
|
int done, do_notify;
|
2011-04-08 17:14:48 +02:00
|
|
|
unsigned int flags, i;
|
|
|
|
size_t size;
|
|
|
|
endpoint_t dst;
|
|
|
|
struct proc *caller_ptr;
|
|
|
|
struct priv *privp;
|
|
|
|
asynmsg_t tabent;
|
|
|
|
vir_bytes table_v;
|
|
|
|
|
|
|
|
privp = priv(src_ptr);
|
|
|
|
if (!(privp->s_flags & SYS_PROC)) return(EPERM);
|
|
|
|
size = privp->s_asynsize;
|
|
|
|
table_v = privp->s_asyntab;
|
|
|
|
|
|
|
|
/* Clear table pending message flag. We're done unless we're not. */
|
|
|
|
privp->s_asyntab = -1;
|
|
|
|
privp->s_asynsize = 0;
|
|
|
|
unset_sys_bit(priv(dst_ptr)->s_asyn_pending, privp->s_id);
|
|
|
|
|
|
|
|
if (size == 0) return(EAGAIN);
|
|
|
|
if (!may_send_to(src_ptr, proc_nr(dst_ptr))) return(ECALLDENIED);
|
|
|
|
|
|
|
|
caller_ptr = src_ptr; /* Needed for A_ macros later on */
|
|
|
|
|
|
|
|
/* Scan the table */
|
|
|
|
do_notify = FALSE;
|
|
|
|
done = TRUE;
|
2011-10-25 20:32:30 +02:00
|
|
|
|
|
|
|
|
2011-04-08 17:14:48 +02:00
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
/* Process each entry in the table and store the result in the table.
|
|
|
|
* If we're done handling a message, copy the result to the sender.
|
|
|
|
* Some checks done in mini_senda are duplicated here, as the sender
|
|
|
|
* could've altered the contents of the table in the mean time.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int r = EDEADSRCDST; /* Cancel delivery due to dead dst */
|
|
|
|
|
|
|
|
/* Copy message to kernel */
|
|
|
|
A_RETR(i);
|
|
|
|
flags = tabent.flags;
|
|
|
|
dst = tabent.dst;
|
|
|
|
|
|
|
|
if (flags == 0) continue; /* Skip empty entries */
|
|
|
|
|
|
|
|
/* 'flags' field must contain only valid bits */
|
|
|
|
if(flags & ~(AMF_VALID|AMF_DONE|AMF_NOTIFY|AMF_NOREPLY|AMF_NOTIFY_ERR))
|
|
|
|
r = EINVAL;
|
|
|
|
else if (!(flags & AMF_VALID)) /* Must contain message */
|
|
|
|
r = EINVAL;
|
|
|
|
else if (flags & AMF_DONE) continue; /* Already done processing */
|
|
|
|
|
|
|
|
/* Message must be directed at receiving end */
|
|
|
|
if (dst != dst_ptr->p_endpoint) {
|
|
|
|
done = FALSE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Store results for sender */
|
|
|
|
tabent.result = r;
|
|
|
|
tabent.flags = flags | AMF_DONE;
|
|
|
|
if (flags & AMF_NOTIFY) do_notify = TRUE;
|
|
|
|
else if (r != OK && (flags & AMF_NOTIFY_ERR)) do_notify = TRUE;
|
|
|
|
A_INSRT(i); /* Copy results to sender */
|
|
|
|
}
|
|
|
|
|
|
|
|
if (do_notify)
|
|
|
|
mini_notify(proc_addr(ASYNCM), src_ptr->p_endpoint);
|
|
|
|
|
|
|
|
if (!done) {
|
|
|
|
privp->s_asyntab = table_v;
|
|
|
|
privp->s_asynsize = size;
|
|
|
|
}
|
|
|
|
|
2011-10-25 20:32:30 +02:00
|
|
|
asyn_error:
|
2011-04-08 17:14:48 +02:00
|
|
|
return(OK);
|
|
|
|
}
|
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/*===========================================================================*
|
2005-08-19 18:43:28 +02:00
|
|
|
* enqueue *
|
2005-04-21 16:53:53 +02:00
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void enqueue(
|
2010-03-27 15:31:00 +01:00
|
|
|
register struct proc *rp /* this process is now runnable */
|
|
|
|
)
|
2005-04-21 16:53:53 +02:00
|
|
|
{
|
2005-08-22 17:14:11 +02:00
|
|
|
/* Add 'rp' to one of the queues of runnable processes. This function is
|
|
|
|
* responsible for inserting a process into one of the scheduling queues.
|
|
|
|
* The mechanism is implemented here. The actual scheduling policy is
|
|
|
|
* defined in sched() and pick_proc().
|
2010-09-15 16:10:18 +02:00
|
|
|
*
|
|
|
|
* This function can be used x-cpu as it always uses the queues of the cpu the
|
|
|
|
* process is assigned to.
|
2005-08-19 18:43:28 +02:00
|
|
|
*/
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
int q = rp->p_priority; /* scheduling queue to use */
|
2010-09-15 16:10:18 +02:00
|
|
|
struct proc **rdy_head, **rdy_tail;
|
|
|
|
|
2010-03-10 14:00:05 +01:00
|
|
|
assert(proc_is_runnable(rp));
|
2005-05-24 14:32:34 +02:00
|
|
|
|
2010-03-10 14:00:05 +01:00
|
|
|
assert(q >= 0);
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
|
2010-09-15 16:10:18 +02:00
|
|
|
rdy_head = get_cpu_var(rp->p_cpu, run_q_head);
|
|
|
|
rdy_tail = get_cpu_var(rp->p_cpu, run_q_tail);
|
|
|
|
|
2005-08-19 18:43:28 +02:00
|
|
|
/* Now add the process to the queue. */
|
2010-03-28 11:54:32 +02:00
|
|
|
if (!rdy_head[q]) { /* add to empty queue */
|
2005-06-20 13:26:48 +02:00
|
|
|
rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
|
2010-03-28 11:54:32 +02:00
|
|
|
rp->p_nextready = NULL; /* mark new end */
|
2005-04-21 16:53:53 +02:00
|
|
|
}
|
2005-06-20 13:26:48 +02:00
|
|
|
else { /* add to tail of queue */
|
|
|
|
rdy_tail[q]->p_nextready = rp; /* chain tail of queue */
|
|
|
|
rdy_tail[q] = rp; /* set new queue tail */
|
2010-03-28 11:54:32 +02:00
|
|
|
rp->p_nextready = NULL; /* mark new end */
|
2005-04-21 16:53:53 +02:00
|
|
|
}
|
2005-08-22 17:14:11 +02:00
|
|
|
|
2010-09-15 16:10:57 +02:00
|
|
|
if (cpuid == rp->p_cpu) {
|
|
|
|
/*
|
|
|
|
* enqueueing a process with a higher priority than the current one,
|
|
|
|
* it gets preempted. The current process must be preemptible. Testing
|
|
|
|
* the priority also makes sure that a process does not preempt itself
|
|
|
|
*/
|
|
|
|
struct proc * p;
|
|
|
|
p = get_cpulocal_var(proc_ptr);
|
|
|
|
assert(p);
|
|
|
|
if((p->p_priority > rp->p_priority) &&
|
|
|
|
(priv(p)->s_flags & PREEMPTIBLE))
|
|
|
|
RTS_SET(p, RTS_PREEMPTED); /* calls dequeue() */
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
2009-11-09 18:48:31 +01:00
|
|
|
/*
|
2010-09-15 16:10:57 +02:00
|
|
|
* if the process was enqueued on a different cpu and the cpu is idle, i.e.
|
|
|
|
* the time is off, we need to wake up that cpu and let it schedule this new
|
|
|
|
* process
|
2007-03-21 10:45:01 +01:00
|
|
|
*/
|
2010-09-15 16:10:57 +02:00
|
|
|
else if (get_cpu_var(rp->p_cpu, cpu_is_idle)) {
|
|
|
|
smp_schedule(rp->p_cpu);
|
|
|
|
}
|
|
|
|
#endif
|
2005-07-14 17:12:12 +02:00
|
|
|
|
2010-09-19 17:52:12 +02:00
|
|
|
/* Make note of when this process was added to queue */
|
|
|
|
read_tsc_64(&(get_cpulocal_var(proc_ptr)->p_accounting.enter_queue));
|
|
|
|
|
|
|
|
|
2010-03-10 14:00:05 +01:00
|
|
|
#if DEBUG_SANITYCHECKS
|
2010-09-15 16:10:18 +02:00
|
|
|
assert(runqueues_ok_local());
|
2005-07-14 17:12:12 +02:00
|
|
|
#endif
|
2005-04-21 16:53:53 +02:00
|
|
|
}
|
|
|
|
|
2009-11-09 18:48:31 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* enqueue_head *
|
|
|
|
*===========================================================================*/
|
|
|
|
/*
|
|
|
|
* put a process at the front of its run queue. It comes handy when a process is
|
|
|
|
* preempted and removed from run queue to not to have a currently not-runnable
|
|
|
|
* process on a run queue. We have to put this process back at the fron to be
|
|
|
|
* fair
|
|
|
|
*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static void enqueue_head(struct proc *rp)
|
2009-11-09 18:48:31 +01:00
|
|
|
{
|
2010-03-27 15:31:00 +01:00
|
|
|
const int q = rp->p_priority; /* scheduling queue to use */
|
2009-11-09 18:48:31 +01:00
|
|
|
|
2010-09-15 16:10:18 +02:00
|
|
|
struct proc **rdy_head, **rdy_tail;
|
|
|
|
|
2010-03-10 14:00:05 +01:00
|
|
|
assert(proc_ptr_ok(rp));
|
|
|
|
assert(proc_is_runnable(rp));
|
2009-11-09 18:48:31 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* the process was runnable without its quantum expired when dequeued. A
|
2013-05-26 16:11:59 +02:00
|
|
|
* process with no time left should have been handled else and differently
|
2009-11-09 18:48:31 +01:00
|
|
|
*/
|
2013-08-07 12:17:09 +02:00
|
|
|
assert(rp->p_cpu_time_left);
|
2009-11-09 18:48:31 +01:00
|
|
|
|
2010-03-10 14:00:05 +01:00
|
|
|
assert(q >= 0);
|
2009-11-09 18:48:31 +01:00
|
|
|
|
|
|
|
|
2010-09-15 16:10:18 +02:00
|
|
|
rdy_head = get_cpu_var(rp->p_cpu, run_q_head);
|
|
|
|
rdy_tail = get_cpu_var(rp->p_cpu, run_q_tail);
|
|
|
|
|
2009-11-09 18:48:31 +01:00
|
|
|
/* Now add the process to the queue. */
|
2010-03-28 11:54:32 +02:00
|
|
|
if (!rdy_head[q]) { /* add to empty queue */
|
2009-11-09 18:48:31 +01:00
|
|
|
rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
|
2010-03-28 11:54:32 +02:00
|
|
|
rp->p_nextready = NULL; /* mark new end */
|
2009-11-09 18:48:31 +01:00
|
|
|
}
|
|
|
|
else /* add to head of queue */
|
|
|
|
rp->p_nextready = rdy_head[q]; /* chain head of queue */
|
|
|
|
rdy_head[q] = rp; /* set new queue head */
|
|
|
|
|
2010-09-19 17:52:12 +02:00
|
|
|
/* Make note of when this process was added to queue */
|
|
|
|
read_tsc_64(&(get_cpulocal_var(proc_ptr->p_accounting.enter_queue)));
|
|
|
|
|
|
|
|
|
|
|
|
/* Process accounting for scheduling */
|
|
|
|
rp->p_accounting.dequeues--;
|
|
|
|
rp->p_accounting.preempted++;
|
|
|
|
|
2010-03-10 14:00:05 +01:00
|
|
|
#if DEBUG_SANITYCHECKS
|
2010-09-15 16:10:18 +02:00
|
|
|
assert(runqueues_ok_local());
|
2009-11-09 18:48:31 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/*===========================================================================*
|
2005-08-19 18:43:28 +02:00
|
|
|
* dequeue *
|
2005-04-21 16:53:53 +02:00
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void dequeue(struct proc *rp)
|
2010-03-27 15:31:00 +01:00
|
|
|
/* this process is no longer runnable */
|
2005-04-21 16:53:53 +02:00
|
|
|
{
|
2005-08-22 17:14:11 +02:00
|
|
|
/* A process must be removed from the scheduling queues, for example, because
|
|
|
|
* it has blocked. If the currently active process is removed, a new process
|
|
|
|
* is picked to run by calling pick_proc().
|
2010-09-15 16:10:18 +02:00
|
|
|
*
|
|
|
|
* This function can operate x-cpu as it always removes the process from the
|
|
|
|
* queue of the cpu the process is currently assigned to.
|
2005-08-22 17:14:11 +02:00
|
|
|
*/
|
2010-09-19 17:52:12 +02:00
|
|
|
int q = rp->p_priority; /* queue to use */
|
|
|
|
struct proc **xpp; /* iterate over queue */
|
|
|
|
struct proc *prev_xp;
|
|
|
|
u64_t tsc, tsc_delta;
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2010-09-15 16:10:18 +02:00
|
|
|
struct proc **rdy_tail;
|
|
|
|
|
2010-03-10 14:00:05 +01:00
|
|
|
assert(proc_ptr_ok(rp));
|
|
|
|
assert(!proc_is_runnable(rp));
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2010-03-10 14:00:05 +01:00
|
|
|
/* Side-effect for kernel: check if the task's stack still is ok? */
|
|
|
|
assert (!iskernelp(rp) || *priv(rp)->s_stack_guard == STACK_GUARD);
|
2005-07-14 17:12:12 +02:00
|
|
|
|
2010-09-15 16:10:18 +02:00
|
|
|
rdy_tail = get_cpu_var(rp->p_cpu, run_q_tail);
|
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* Now make sure that the process is not in its ready queue. Remove the
|
2005-05-20 11:37:43 +02:00
|
|
|
* process if it is found. A process can be made unready even if it is not
|
|
|
|
* running by being sent a signal that kills it.
|
2005-04-21 16:53:53 +02:00
|
|
|
*/
|
2010-03-28 11:54:32 +02:00
|
|
|
prev_xp = NULL;
|
2010-09-15 16:10:18 +02:00
|
|
|
for (xpp = get_cpu_var_ptr(rp->p_cpu, run_q_head[q]); *xpp;
|
|
|
|
xpp = &(*xpp)->p_nextready) {
|
2005-06-20 13:26:48 +02:00
|
|
|
if (*xpp == rp) { /* found process to remove */
|
|
|
|
*xpp = (*xpp)->p_nextready; /* replace with next chain */
|
2010-03-10 14:00:05 +01:00
|
|
|
if (rp == rdy_tail[q]) { /* queue tail removed */
|
2005-06-20 13:26:48 +02:00
|
|
|
rdy_tail[q] = prev_xp; /* set new tail */
|
2010-03-10 14:00:05 +01:00
|
|
|
}
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
|
2005-05-26 15:17:57 +02:00
|
|
|
break;
|
|
|
|
}
|
2005-06-20 13:26:48 +02:00
|
|
|
prev_xp = *xpp; /* save previous in chain */
|
2005-04-21 16:53:53 +02:00
|
|
|
}
|
2005-11-14 16:50:46 +01:00
|
|
|
|
2010-09-19 17:52:12 +02:00
|
|
|
|
|
|
|
/* Process accounting for scheduling */
|
|
|
|
rp->p_accounting.dequeues++;
|
|
|
|
|
|
|
|
/* this is not all that accurate on virtual machines, especially with
|
|
|
|
IO bound processes that only spend a short amount of time in the queue
|
|
|
|
at a time. */
|
2013-08-07 12:17:09 +02:00
|
|
|
if (rp->p_accounting.enter_queue) {
|
2010-09-19 17:52:12 +02:00
|
|
|
read_tsc_64(&tsc);
|
2013-08-07 12:17:09 +02:00
|
|
|
tsc_delta = tsc - rp->p_accounting.enter_queue;
|
|
|
|
rp->p_accounting.time_in_queue = rp->p_accounting.time_in_queue +
|
|
|
|
tsc_delta;
|
|
|
|
rp->p_accounting.enter_queue = 0;
|
2010-09-19 17:52:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-03-10 14:00:05 +01:00
|
|
|
#if DEBUG_SANITYCHECKS
|
2010-09-15 16:10:18 +02:00
|
|
|
assert(runqueues_ok_local());
|
2005-07-14 17:12:12 +02:00
|
|
|
#endif
|
2005-04-21 16:53:53 +02:00
|
|
|
}
|
|
|
|
|
2005-06-30 17:55:19 +02:00
|
|
|
/*===========================================================================*
|
|
|
|
* pick_proc *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static struct proc * pick_proc(void)
|
2005-06-30 17:55:19 +02:00
|
|
|
{
|
2009-11-09 18:48:31 +01:00
|
|
|
/* Decide who to run now. A new process is selected an returned.
|
2005-06-30 17:55:19 +02:00
|
|
|
* When a billable process is selected, record it in 'bill_ptr', so that the
|
|
|
|
* clock task can tell who to bill for system time.
|
2010-09-15 16:10:18 +02:00
|
|
|
*
|
2011-02-25 17:46:30 +01:00
|
|
|
* This function always uses the run queues of the local cpu!
|
2005-06-30 17:55:19 +02:00
|
|
|
*/
|
|
|
|
register struct proc *rp; /* process to run */
|
2010-09-15 16:10:18 +02:00
|
|
|
struct proc **rdy_head;
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
int q; /* iterate over queues */
|
|
|
|
|
2005-06-30 17:55:19 +02:00
|
|
|
/* Check each of the scheduling queues for ready processes. The number of
|
|
|
|
* queues is defined in proc.h, and priorities are set in the task table.
|
2011-02-25 17:46:30 +01:00
|
|
|
* If there are no processes ready to run, return NULL.
|
2005-06-30 17:55:19 +02:00
|
|
|
*/
|
2010-09-15 16:10:18 +02:00
|
|
|
rdy_head = get_cpulocal_var(run_q_head);
|
2005-06-30 17:55:19 +02:00
|
|
|
for (q=0; q < NR_SCHED_QUEUES; q++) {
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
if(!(rp = rdy_head[q])) {
|
2010-09-15 16:11:01 +02:00
|
|
|
TRACE(VF_PICKPROC, printf("cpu %d queue %d empty\n", cpuid, q););
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
continue;
|
|
|
|
}
|
2010-03-10 14:00:05 +01:00
|
|
|
assert(proc_is_runnable(rp));
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
if (priv(rp)->s_flags & BILLABLE)
|
2010-09-15 16:09:46 +02:00
|
|
|
get_cpulocal_var(bill_ptr) = rp; /* bill for system time */
|
2009-11-09 18:48:31 +01:00
|
|
|
return rp;
|
2005-06-30 17:55:19 +02:00
|
|
|
}
|
2009-11-09 18:48:31 +01:00
|
|
|
return NULL;
|
2005-04-21 16:53:53 +02:00
|
|
|
}
|
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* endpoint_lookup *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
struct proc *endpoint_lookup(endpoint_t e)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if(!isokendpt(e, &n)) return NULL;
|
|
|
|
|
|
|
|
return proc_addr(n);
|
|
|
|
}
|
|
|
|
|
'proc number' is process slot, 'endpoint' are generation-aware process
instance numbers, encoded and decoded using macros in <minix/endpoint.h>.
proc number -> endpoint migration
. proc_nr in the interrupt hook is now an endpoint, proc_nr_e.
. m_source for messages and notifies is now an endpoint, instead of
proc number.
. isokendpt() converts an endpoint to a process number, returns
success (but fails if the process number is out of range, the
process slot is not a living process, or the given endpoint
number does not match the endpoint number in the process slot,
indicating an old process).
. okendpt() is the same as isokendpt(), but panic()s if the conversion
fails. This is mainly used for decoding message.m_source endpoints,
and other endpoint numbers in kernel data structures, which should
always be correct.
. if DEBUG_ENABLE_IPC_WARNINGS is enabled, isokendpt() and okendpt()
get passed the __FILE__ and __LINE__ of the calling lines, and
print messages about what is wrong with the endpoint number
(out of range proc, empty proc, or inconsistent endpoint number),
with the caller, making finding where the conversion failed easy
without having to include code for every call to print where things
went wrong. Sometimes this is harmless (wrong arg to a kernel call),
sometimes it's a fatal internal inconsistency (bogus m_source).
. some process table fields have been appended an _e to indicate it's
become and endpoint.
. process endpoint is stored in p_endpoint, without generation number.
it turns out the kernel never needs the generation number, except
when fork()ing, so it's decoded then.
. kernel calls all take endpoints as arguments, not proc numbers.
the one exception is sys_fork(), which needs to know in which slot
to put the child.
2006-03-03 11:00:02 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* isokendpt_f *
|
|
|
|
*===========================================================================*/
|
|
|
|
#if DEBUG_ENABLE_IPC_WARNINGS
|
2012-03-25 20:25:53 +02:00
|
|
|
int isokendpt_f(file, line, e, p, fatalflag)
|
2010-03-27 15:31:00 +01:00
|
|
|
const char *file;
|
'proc number' is process slot, 'endpoint' are generation-aware process
instance numbers, encoded and decoded using macros in <minix/endpoint.h>.
proc number -> endpoint migration
. proc_nr in the interrupt hook is now an endpoint, proc_nr_e.
. m_source for messages and notifies is now an endpoint, instead of
proc number.
. isokendpt() converts an endpoint to a process number, returns
success (but fails if the process number is out of range, the
process slot is not a living process, or the given endpoint
number does not match the endpoint number in the process slot,
indicating an old process).
. okendpt() is the same as isokendpt(), but panic()s if the conversion
fails. This is mainly used for decoding message.m_source endpoints,
and other endpoint numbers in kernel data structures, which should
always be correct.
. if DEBUG_ENABLE_IPC_WARNINGS is enabled, isokendpt() and okendpt()
get passed the __FILE__ and __LINE__ of the calling lines, and
print messages about what is wrong with the endpoint number
(out of range proc, empty proc, or inconsistent endpoint number),
with the caller, making finding where the conversion failed easy
without having to include code for every call to print where things
went wrong. Sometimes this is harmless (wrong arg to a kernel call),
sometimes it's a fatal internal inconsistency (bogus m_source).
. some process table fields have been appended an _e to indicate it's
become and endpoint.
. process endpoint is stored in p_endpoint, without generation number.
it turns out the kernel never needs the generation number, except
when fork()ing, so it's decoded then.
. kernel calls all take endpoints as arguments, not proc numbers.
the one exception is sys_fork(), which needs to know in which slot
to put the child.
2006-03-03 11:00:02 +01:00
|
|
|
int line;
|
|
|
|
#else
|
2012-03-25 20:25:53 +02:00
|
|
|
int isokendpt_f(e, p, fatalflag)
|
'proc number' is process slot, 'endpoint' are generation-aware process
instance numbers, encoded and decoded using macros in <minix/endpoint.h>.
proc number -> endpoint migration
. proc_nr in the interrupt hook is now an endpoint, proc_nr_e.
. m_source for messages and notifies is now an endpoint, instead of
proc number.
. isokendpt() converts an endpoint to a process number, returns
success (but fails if the process number is out of range, the
process slot is not a living process, or the given endpoint
number does not match the endpoint number in the process slot,
indicating an old process).
. okendpt() is the same as isokendpt(), but panic()s if the conversion
fails. This is mainly used for decoding message.m_source endpoints,
and other endpoint numbers in kernel data structures, which should
always be correct.
. if DEBUG_ENABLE_IPC_WARNINGS is enabled, isokendpt() and okendpt()
get passed the __FILE__ and __LINE__ of the calling lines, and
print messages about what is wrong with the endpoint number
(out of range proc, empty proc, or inconsistent endpoint number),
with the caller, making finding where the conversion failed easy
without having to include code for every call to print where things
went wrong. Sometimes this is harmless (wrong arg to a kernel call),
sometimes it's a fatal internal inconsistency (bogus m_source).
. some process table fields have been appended an _e to indicate it's
become and endpoint.
. process endpoint is stored in p_endpoint, without generation number.
it turns out the kernel never needs the generation number, except
when fork()ing, so it's decoded then.
. kernel calls all take endpoints as arguments, not proc numbers.
the one exception is sys_fork(), which needs to know in which slot
to put the child.
2006-03-03 11:00:02 +01:00
|
|
|
#endif
|
2006-06-20 11:57:00 +02:00
|
|
|
endpoint_t e;
|
2010-03-27 15:31:00 +01:00
|
|
|
int *p;
|
|
|
|
const int fatalflag;
|
'proc number' is process slot, 'endpoint' are generation-aware process
instance numbers, encoded and decoded using macros in <minix/endpoint.h>.
proc number -> endpoint migration
. proc_nr in the interrupt hook is now an endpoint, proc_nr_e.
. m_source for messages and notifies is now an endpoint, instead of
proc number.
. isokendpt() converts an endpoint to a process number, returns
success (but fails if the process number is out of range, the
process slot is not a living process, or the given endpoint
number does not match the endpoint number in the process slot,
indicating an old process).
. okendpt() is the same as isokendpt(), but panic()s if the conversion
fails. This is mainly used for decoding message.m_source endpoints,
and other endpoint numbers in kernel data structures, which should
always be correct.
. if DEBUG_ENABLE_IPC_WARNINGS is enabled, isokendpt() and okendpt()
get passed the __FILE__ and __LINE__ of the calling lines, and
print messages about what is wrong with the endpoint number
(out of range proc, empty proc, or inconsistent endpoint number),
with the caller, making finding where the conversion failed easy
without having to include code for every call to print where things
went wrong. Sometimes this is harmless (wrong arg to a kernel call),
sometimes it's a fatal internal inconsistency (bogus m_source).
. some process table fields have been appended an _e to indicate it's
become and endpoint.
. process endpoint is stored in p_endpoint, without generation number.
it turns out the kernel never needs the generation number, except
when fork()ing, so it's decoded then.
. kernel calls all take endpoints as arguments, not proc numbers.
the one exception is sys_fork(), which needs to know in which slot
to put the child.
2006-03-03 11:00:02 +01:00
|
|
|
{
|
|
|
|
int ok = 0;
|
|
|
|
/* Convert an endpoint number into a process number.
|
|
|
|
* Return nonzero if the process is alive with the corresponding
|
|
|
|
* generation number, zero otherwise.
|
|
|
|
*
|
|
|
|
* This function is called with file and line number by the
|
|
|
|
* isokendpt_d macro if DEBUG_ENABLE_IPC_WARNINGS is defined,
|
|
|
|
* otherwise without. This allows us to print the where the
|
|
|
|
* conversion was attempted, making the errors verbose without
|
|
|
|
* adding code for that at every call.
|
|
|
|
*
|
|
|
|
* If fatalflag is nonzero, we must panic if the conversion doesn't
|
|
|
|
* succeed.
|
|
|
|
*/
|
|
|
|
*p = _ENDPOINT_P(e);
|
2012-03-28 18:23:12 +02:00
|
|
|
ok = 0;
|
|
|
|
if(isokprocn(*p) && !isemptyn(*p) && proc_addr(*p)->p_endpoint == e)
|
|
|
|
ok = 1;
|
|
|
|
if(!ok && fatalflag)
|
2010-03-05 16:05:11 +01:00
|
|
|
panic("invalid endpoint: %d", e);
|
'proc number' is process slot, 'endpoint' are generation-aware process
instance numbers, encoded and decoded using macros in <minix/endpoint.h>.
proc number -> endpoint migration
. proc_nr in the interrupt hook is now an endpoint, proc_nr_e.
. m_source for messages and notifies is now an endpoint, instead of
proc number.
. isokendpt() converts an endpoint to a process number, returns
success (but fails if the process number is out of range, the
process slot is not a living process, or the given endpoint
number does not match the endpoint number in the process slot,
indicating an old process).
. okendpt() is the same as isokendpt(), but panic()s if the conversion
fails. This is mainly used for decoding message.m_source endpoints,
and other endpoint numbers in kernel data structures, which should
always be correct.
. if DEBUG_ENABLE_IPC_WARNINGS is enabled, isokendpt() and okendpt()
get passed the __FILE__ and __LINE__ of the calling lines, and
print messages about what is wrong with the endpoint number
(out of range proc, empty proc, or inconsistent endpoint number),
with the caller, making finding where the conversion failed easy
without having to include code for every call to print where things
went wrong. Sometimes this is harmless (wrong arg to a kernel call),
sometimes it's a fatal internal inconsistency (bogus m_source).
. some process table fields have been appended an _e to indicate it's
become and endpoint.
. process endpoint is stored in p_endpoint, without generation number.
it turns out the kernel never needs the generation number, except
when fork()ing, so it's decoded then.
. kernel calls all take endpoints as arguments, not proc numbers.
the one exception is sys_fork(), which needs to know in which slot
to put the child.
2006-03-03 11:00:02 +01:00
|
|
|
return ok;
|
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void notify_scheduler(struct proc *p)
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
{
|
2010-05-25 10:06:14 +02:00
|
|
|
message m_no_quantum;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
assert(!proc_kernel_scheduler(p));
|
|
|
|
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
/* dequeue the process */
|
|
|
|
RTS_SET(p, RTS_NO_QUANTUM);
|
|
|
|
/*
|
|
|
|
* Notify the process's scheduler that it has run out of
|
|
|
|
* quantum. This is done by sending a message to the scheduler
|
|
|
|
* on the process's behalf
|
|
|
|
*/
|
2010-05-25 10:06:14 +02:00
|
|
|
m_no_quantum.m_source = p->p_endpoint;
|
|
|
|
m_no_quantum.m_type = SCHEDULING_NO_QUANTUM;
|
2010-09-19 17:52:12 +02:00
|
|
|
m_no_quantum.SCHEDULING_ACNT_QUEUE = cpu_time_2_ms(p->p_accounting.time_in_queue);
|
|
|
|
m_no_quantum.SCHEDULING_ACNT_DEQS = p->p_accounting.dequeues;
|
|
|
|
m_no_quantum.SCHEDULING_ACNT_IPC_SYNC = p->p_accounting.ipc_sync;
|
|
|
|
m_no_quantum.SCHEDULING_ACNT_IPC_ASYNC = p->p_accounting.ipc_async;
|
|
|
|
m_no_quantum.SCHEDULING_ACNT_PREEMPT = p->p_accounting.preempted;
|
|
|
|
m_no_quantum.SCHEDULING_ACNT_CPU = cpuid;
|
|
|
|
m_no_quantum.SCHEDULING_ACNT_CPU_LOAD = cpu_load();
|
|
|
|
|
|
|
|
/* Reset accounting */
|
|
|
|
reset_proc_accounting(p);
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
|
2010-05-25 10:06:14 +02:00
|
|
|
if ((err = mini_send(p, p->p_scheduler->p_endpoint,
|
|
|
|
&m_no_quantum, FROM_KERNEL))) {
|
|
|
|
panic("WARNING: Scheduling: mini_send returned %d\n", err);
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void proc_no_time(struct proc * p)
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
{
|
2010-05-25 10:06:14 +02:00
|
|
|
if (!proc_kernel_scheduler(p) && priv(p)->s_flags & PREEMPTIBLE) {
|
|
|
|
/* this dequeues the process */
|
|
|
|
notify_scheduler(p);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* non-preemptible processes only need their quantum to
|
|
|
|
* be renewed. In fact, they by pass scheduling
|
|
|
|
*/
|
|
|
|
p->p_cpu_time_left = ms_2_cpu_time(p->p_quantum_size_ms);
|
2010-05-08 20:00:03 +02:00
|
|
|
#if DEBUG_RACE
|
2011-11-07 19:59:02 +01:00
|
|
|
RTS_SET(p, RTS_PREEMPTED);
|
|
|
|
RTS_UNSET(p, RTS_PREEMPTED);
|
2010-05-08 20:00:03 +02:00
|
|
|
#endif
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
}
|
|
|
|
}
|
2010-09-19 17:52:12 +02:00
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void reset_proc_accounting(struct proc *p)
|
2010-09-19 17:52:12 +02:00
|
|
|
{
|
|
|
|
p->p_accounting.preempted = 0;
|
|
|
|
p->p_accounting.ipc_sync = 0;
|
|
|
|
p->p_accounting.ipc_async = 0;
|
|
|
|
p->p_accounting.dequeues = 0;
|
2013-08-07 12:17:09 +02:00
|
|
|
p->p_accounting.time_in_queue = 0;
|
|
|
|
p->p_accounting.enter_queue = 0;
|
2010-09-19 17:52:12 +02:00
|
|
|
}
|
2010-06-07 09:43:17 +02:00
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void copr_not_available_handler(void)
|
2010-06-07 09:43:17 +02:00
|
|
|
{
|
2010-09-15 16:09:46 +02:00
|
|
|
struct proc * p;
|
2010-09-15 16:11:25 +02:00
|
|
|
struct proc ** local_fpu_owner;
|
2010-06-07 09:43:17 +02:00
|
|
|
/*
|
|
|
|
* Disable the FPU exception (both for the kernel and for the process
|
|
|
|
* once it's scheduled), and initialize or restore the FPU state.
|
|
|
|
*/
|
|
|
|
|
|
|
|
disable_fpu_exception();
|
|
|
|
|
2010-09-15 16:09:46 +02:00
|
|
|
p = get_cpulocal_var(proc_ptr);
|
|
|
|
|
2010-06-07 09:43:17 +02:00
|
|
|
/* if FPU is not owned by anyone, do not store anything */
|
2010-09-15 16:11:25 +02:00
|
|
|
local_fpu_owner = get_cpulocal_var_ptr(fpu_owner);
|
|
|
|
if (*local_fpu_owner != NULL) {
|
|
|
|
assert(*local_fpu_owner != p);
|
2012-03-03 20:33:02 +01:00
|
|
|
save_local_fpu(*local_fpu_owner, FALSE /*retain*/);
|
2010-06-07 09:43:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* restore the current process' state and let it run again, do not
|
|
|
|
* schedule!
|
|
|
|
*/
|
2012-03-03 19:25:57 +01:00
|
|
|
if (restore_fpu(p) != OK) {
|
|
|
|
/* Restoring FPU state failed. This is always the process's own
|
|
|
|
* fault. Send a signal, and schedule another process instead.
|
|
|
|
*/
|
2012-03-03 20:33:02 +01:00
|
|
|
*local_fpu_owner = NULL; /* release FPU */
|
2012-03-03 19:25:57 +01:00
|
|
|
cause_sig(proc_nr(p), SIGFPE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-09-15 16:11:25 +02:00
|
|
|
*local_fpu_owner = p;
|
2010-06-07 09:43:17 +02:00
|
|
|
context_stop(proc_addr(KERNEL));
|
2010-09-15 16:09:46 +02:00
|
|
|
restore_user_context(p);
|
2010-06-07 09:43:17 +02:00
|
|
|
NOT_REACHABLE;
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void release_fpu(struct proc * p) {
|
2010-09-15 16:11:25 +02:00
|
|
|
struct proc ** fpu_owner_ptr;
|
|
|
|
|
|
|
|
fpu_owner_ptr = get_cpu_var_ptr(p->p_cpu, fpu_owner);
|
|
|
|
|
|
|
|
if (*fpu_owner_ptr == p)
|
|
|
|
*fpu_owner_ptr = NULL;
|
2010-06-07 09:43:17 +02:00
|
|
|
}
|
2013-05-21 14:57:57 +02:00
|
|
|
|
|
|
|
void ser_dump_proc()
|
|
|
|
{
|
|
|
|
struct proc *pp;
|
|
|
|
|
|
|
|
for (pp= BEG_PROC_ADDR; pp < END_PROC_ADDR; pp++)
|
|
|
|
{
|
|
|
|
if (isemptyp(pp))
|
|
|
|
continue;
|
|
|
|
print_proc_recursive(pp);
|
|
|
|
}
|
|
|
|
}
|
2013-06-25 14:41:01 +02:00
|
|
|
|
|
|
|
void increase_proc_signals(struct proc *p)
|
|
|
|
{
|
|
|
|
p->p_signal_received++;
|
|
|
|
}
|