cb176df60f
UPDATING INFO: 20100317: /usr/src/etc/system.conf updated to ignore default kernel calls: copy it (or merge it) to /etc/system.conf. The hello driver (/dev/hello) added to the distribution: # cd /usr/src/commands/scripts && make clean install # cd /dev && MAKEDEV hello KERNEL CHANGES: - Generic signal handling support. The kernel no longer assumes PM as a signal manager for every process. The signal manager of a given process can now be specified in its privilege slot. When a signal has to be delivered, the kernel performs the lookup and forwards the signal to the appropriate signal manager. PM is the default signal manager for user processes, RS is the default signal manager for system processes. To enable ptrace()ing for system processes, it is sufficient to change the default signal manager to PM. This will temporarily disable crash recovery, though. - sys_exit() is now split into sys_exit() (i.e. exit() for system processes, which generates a self-termination signal), and sys_clear() (i.e. used by PM to ask the kernel to clear a process slot when a process exits). - Added a new kernel call (i.e. sys_update()) to swap two process slots and implement live update. PM CHANGES: - Posix signal handling is no longer allowed for system processes. System signals are split into two fixed categories: termination and non-termination signals. When a non-termination signaled is processed, PM transforms the signal into an IPC message and delivers the message to the system process. When a termination signal is processed, PM terminates the process. - PM no longer assumes itself as the signal manager for system processes. It now makes sure that every system signal goes through the kernel before being actually processes. The kernel will then dispatch the signal to the appropriate signal manager which may or may not be PM. SYSLIB CHANGES: - Simplified SEF init and LU callbacks. - Added additional predefined SEF callbacks to debug crash recovery and live update. - Fixed a temporary ack in the SEF init protocol. SEF init reply is now completely synchronous. - Added SEF signal event type to provide a uniform interface for system processes to deal with signals. A sef_cb_signal_handler() callback is available for system processes to handle every received signal. A sef_cb_signal_manager() callback is used by signal managers to process system signals on behalf of the kernel. - Fixed a few bugs with memory mapping and DS. VM CHANGES: - Page faults and memory requests coming from the kernel are now implemented using signals. - Added a new VM call to swap two process slots and implement live update. - The call is used by RS at update time and in turn invokes the kernel call sys_update(). RS CHANGES: - RS has been reworked with a better functional decomposition. - Better kernel call masks. com.h now defines the set of very basic kernel calls every system service is allowed to use. This makes system.conf simpler and easier to maintain. In addition, this guarantees a higher level of isolation for system libraries that use one or more kernel calls internally (e.g. printf). - RS is the default signal manager for system processes. By default, RS intercepts every signal delivered to every system process. This makes crash recovery possible before bringing PM and friends in the loop. - RS now supports fast rollback when something goes wrong while initializing the new version during a live update. - Live update is now implemented by keeping the two versions side-by-side and swapping the process slots when the old version is ready to update. - Crash recovery is now implemented by keeping the two versions side-by-side and cleaning up the old version only when the recovery process is complete. DS CHANGES: - Fixed a bug when the process doing ds_publish() or ds_delete() is not known by DS. - Fixed the completely broken support for strings. String publishing is now implemented in the system library and simply wraps publishing of memory ranges. Ideally, we should adopt a similar approach for other data types as well. - Test suite fixed. DRIVER CHANGES: - The hello driver has been added to the Minix distribution to demonstrate basic live update and crash recovery functionalities. - Other drivers have been adapted to conform the new SEF interface.
308 lines
8.5 KiB
C
308 lines
8.5 KiB
C
|
|
#define _SYSTEM 1
|
|
|
|
#include <minix/type.h>
|
|
#include <minix/config.h>
|
|
#include <minix/const.h>
|
|
#include <minix/sysutil.h>
|
|
#include <minix/syslib.h>
|
|
|
|
#include <limits.h>
|
|
#include <errno.h>
|
|
#include <assert.h>
|
|
#include <stdint.h>
|
|
#include <memory.h>
|
|
|
|
#include "vm.h"
|
|
#include "proto.h"
|
|
#include "util.h"
|
|
#include "glo.h"
|
|
#include "region.h"
|
|
#include "sanitycheck.h"
|
|
|
|
/*===========================================================================*
|
|
* split_phys *
|
|
*===========================================================================*/
|
|
PRIVATE int split_phys(struct phys_region *pr, vir_bytes point)
|
|
{
|
|
struct phys_region *newpr, *q, *prev;
|
|
struct phys_block *newpb;
|
|
struct phys_block *pb = pr->ph;
|
|
/* Split the phys region into 2 parts by @point. */
|
|
|
|
if(pr->offset >= point || pr->offset + pb->length <= point)
|
|
return OK;
|
|
if(!SLABALLOC(newpb))
|
|
return ENOMEM;
|
|
|
|
/* Split phys block. */
|
|
*newpb = *pb;
|
|
pb->length = point - pr->offset;
|
|
newpb->length -= pb->length;
|
|
newpb->phys += pb->length;
|
|
|
|
/* Split phys regions in a list. */
|
|
for(q = pb->firstregion; q; q = q->next_ph_list) {
|
|
if(!SLABALLOC(newpr))
|
|
return ENOMEM;
|
|
|
|
*newpr = *q;
|
|
newpr->ph = newpb;
|
|
newpr->offset += pb->length;
|
|
|
|
/* Link to the vir region's phys region list. */
|
|
physr_insert(newpr->parent->phys, newpr);
|
|
|
|
/* Link to the next_ph_list. */
|
|
if(q == pb->firstregion) {
|
|
newpb->firstregion = newpr;
|
|
prev = newpr;
|
|
} else {
|
|
prev->next_ph_list = newpr;
|
|
prev = newpr;
|
|
}
|
|
}
|
|
prev->next_ph_list = NULL;
|
|
|
|
return OK;
|
|
}
|
|
|
|
/*===========================================================================*
|
|
* rm_phys_regions *
|
|
*===========================================================================*/
|
|
PRIVATE void rm_phys_regions(struct vir_region *region,
|
|
vir_bytes begin, vir_bytes length)
|
|
{
|
|
/* Remove all phys regions between @begin and @begin+length.
|
|
*
|
|
* Don't update the page table, because we will update it at map_memory()
|
|
* later.
|
|
*/
|
|
struct phys_region *pr;
|
|
physr_iter iter;
|
|
|
|
physr_start_iter(region->phys, &iter, begin, AVL_GREATER_EQUAL);
|
|
while((pr = physr_get_iter(&iter)) && pr->offset < begin + length) {
|
|
pb_unreferenced(region, pr);
|
|
physr_remove(region->phys, pr->offset);
|
|
physr_start_iter(region->phys, &iter, begin,
|
|
AVL_GREATER_EQUAL);
|
|
SLABFREE(pr);
|
|
}
|
|
}
|
|
|
|
/*===========================================================================*
|
|
* clean_phys_regions *
|
|
*===========================================================================*/
|
|
PRIVATE void clean_phys_regions(struct vir_region *region,
|
|
vir_bytes offset, vir_bytes length)
|
|
{
|
|
/* Consider @offset as the start address and @offset+length as the end address.
|
|
* If there are phys regions crossing the start address or the end address,
|
|
* split them into 2 parts.
|
|
*
|
|
* We assume that the phys regions are listed in order and don't overlap.
|
|
*/
|
|
struct phys_region *pr;
|
|
physr_iter iter;
|
|
|
|
physr_start_iter_least(region->phys, &iter);
|
|
while((pr = physr_get_iter(&iter))) {
|
|
/* If this phys region crosses the start address, split it. */
|
|
if(pr->offset < offset
|
|
&& pr->offset + pr->ph->length > offset) {
|
|
split_phys(pr, offset);
|
|
physr_start_iter_least(region->phys, &iter);
|
|
}
|
|
/* If this phys region crosses the end address, split it. */
|
|
else if(pr->offset < offset + length
|
|
&& pr->offset + pr->ph->length > offset + length) {
|
|
split_phys(pr, offset + length);
|
|
physr_start_iter_least(region->phys, &iter);
|
|
}
|
|
else {
|
|
physr_incr_iter(&iter);
|
|
}
|
|
}
|
|
}
|
|
|
|
/*===========================================================================*
|
|
* do_map_memory *
|
|
*===========================================================================*/
|
|
PRIVATE int do_map_memory(struct vmproc *vms, struct vmproc *vmd,
|
|
struct vir_region *vrs, struct vir_region *vrd,
|
|
vir_bytes offset_s, vir_bytes offset_d,
|
|
vir_bytes length, int flag)
|
|
{
|
|
struct phys_region *prs;
|
|
struct phys_region *newphysr;
|
|
struct phys_block *pb;
|
|
physr_iter iter;
|
|
u32_t pt_flag = PTF_PRESENT | PTF_USER;
|
|
vir_bytes end;
|
|
|
|
/* Search for the first phys region in the source process. */
|
|
physr_start_iter(vrs->phys, &iter, offset_s, AVL_EQUAL);
|
|
prs = physr_get_iter(&iter);
|
|
if(!prs)
|
|
panic("do_map_memory: no aligned phys region: %d", 0);
|
|
|
|
/* flag: 0 -> read-only
|
|
* 1 -> writable
|
|
* -1 -> share as COW, so read-only
|
|
*/
|
|
if(flag > 0)
|
|
pt_flag |= PTF_WRITE;
|
|
|
|
/* Map phys blocks in the source process to the destination process. */
|
|
end = offset_d + length;
|
|
while((prs = physr_get_iter(&iter)) && offset_d < end) {
|
|
/* If a SMAP share was requested but the phys block has already
|
|
* been shared as COW, copy the block for the source phys region
|
|
* first.
|
|
*/
|
|
pb = prs->ph;
|
|
if(flag >= 0 && pb->refcount > 1
|
|
&& pb->share_flag == PBSH_COW) {
|
|
map_copy_ph_block(vms, vrs, prs);
|
|
pb = prs->ph;
|
|
}
|
|
|
|
/* Allocate a new phys region. */
|
|
if(!SLABALLOC(newphysr))
|
|
return ENOMEM;
|
|
|
|
/* Set and link the new phys region to the block. */
|
|
newphysr->ph = pb;
|
|
newphysr->offset = offset_d;
|
|
newphysr->parent = vrd;
|
|
newphysr->next_ph_list = pb->firstregion;
|
|
pb->firstregion = newphysr;
|
|
physr_insert(newphysr->parent->phys, newphysr);
|
|
pb->refcount++;
|
|
|
|
/* If a COW share was requested but the phys block has already
|
|
* been shared as SMAP, give up on COW and copy the block for
|
|
* the destination phys region now.
|
|
*/
|
|
if(flag < 0 && pb->refcount > 1
|
|
&& pb->share_flag == PBSH_SMAP) {
|
|
map_copy_ph_block(vmd, vrd, newphysr);
|
|
}
|
|
else {
|
|
/* See if this is a COW share or SMAP share. */
|
|
if(flag < 0) { /* COW share */
|
|
pb->share_flag = PBSH_COW;
|
|
/* Update the page table for the src process. */
|
|
pt_writemap(&vms->vm_pt, offset_s + vrs->vaddr,
|
|
pb->phys, pb->length,
|
|
pt_flag, WMF_OVERWRITE);
|
|
}
|
|
else { /* SMAP share */
|
|
pb->share_flag = PBSH_SMAP;
|
|
}
|
|
/* Update the page table for the destination process. */
|
|
pt_writemap(&vmd->vm_pt, offset_d + vrd->vaddr,
|
|
pb->phys, pb->length, pt_flag, WMF_OVERWRITE);
|
|
}
|
|
|
|
physr_incr_iter(&iter);
|
|
offset_d += pb->length;
|
|
offset_s += pb->length;
|
|
}
|
|
return OK;
|
|
}
|
|
|
|
/*===========================================================================*
|
|
* map_memory *
|
|
*===========================================================================*/
|
|
PUBLIC int map_memory(endpoint_t sour, endpoint_t dest,
|
|
vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
|
|
{
|
|
/* This is the entry point. This function will be called by handle_memory() when
|
|
* VM recieves a map-memory request.
|
|
*/
|
|
struct vmproc *vms, *vmd;
|
|
struct vir_region *vrs, *vrd;
|
|
physr_iter iterd;
|
|
vir_bytes offset_s, offset_d;
|
|
int p;
|
|
int r;
|
|
|
|
if(vm_isokendpt(sour, &p) != OK)
|
|
panic("map_memory: bad endpoint: %d", sour);
|
|
vms = &vmproc[p];
|
|
if(vm_isokendpt(dest, &p) != OK)
|
|
panic("map_memory: bad endpoint: %d", dest);
|
|
vmd = &vmproc[p];
|
|
|
|
vrs = map_lookup(vms, virt_s);
|
|
vm_assert(vrs);
|
|
vrd = map_lookup(vmd, virt_d);
|
|
vm_assert(vrd);
|
|
|
|
/* Linear address -> offset from start of vir region. */
|
|
offset_s = virt_s - vrs->vaddr;
|
|
offset_d = virt_d - vrd->vaddr;
|
|
|
|
/* Make sure that the range in the source process has been mapped
|
|
* to physical memory.
|
|
*/
|
|
map_handle_memory(vms, vrs, offset_s, length, 0);
|
|
|
|
/* Prepare work. */
|
|
clean_phys_regions(vrs, offset_s, length);
|
|
clean_phys_regions(vrd, offset_d, length);
|
|
rm_phys_regions(vrd, offset_d, length);
|
|
|
|
/* Map memory. */
|
|
r = do_map_memory(vms, vmd, vrs, vrd, offset_s, offset_d, length, flag);
|
|
|
|
return r;
|
|
}
|
|
|
|
/*===========================================================================*
|
|
* unmap_memory *
|
|
*===========================================================================*/
|
|
PUBLIC int unmap_memory(endpoint_t sour, endpoint_t dest,
|
|
vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
|
|
{
|
|
struct vmproc *vmd;
|
|
struct vir_region *vrd;
|
|
struct phys_region *pr;
|
|
struct phys_block *pb;
|
|
physr_iter iter;
|
|
vir_bytes off, end;
|
|
int p;
|
|
|
|
/* Use information on the destination process to unmap. */
|
|
if(vm_isokendpt(dest, &p) != OK)
|
|
panic("unmap_memory: bad endpoint: %d", dest);
|
|
vmd = &vmproc[p];
|
|
|
|
vrd = map_lookup(vmd, virt_d);
|
|
vm_assert(vrd);
|
|
|
|
/* Search for the first phys region in the destination process. */
|
|
off = virt_d - vrd->vaddr;
|
|
physr_start_iter(vrd->phys, &iter, off, AVL_EQUAL);
|
|
pr = physr_get_iter(&iter);
|
|
if(!pr)
|
|
panic("unmap_memory: no aligned phys region: %d", 0);
|
|
|
|
/* Copy the phys block now rather than doing COW. */
|
|
end = off + length;
|
|
while((pr = physr_get_iter(&iter)) && off < end) {
|
|
pb = pr->ph;
|
|
vm_assert(pb->refcount > 1);
|
|
vm_assert(pb->share_flag == PBSH_SMAP);
|
|
|
|
map_copy_ph_block(vmd, vrd, pr);
|
|
|
|
physr_incr_iter(&iter);
|
|
off += pb->length;
|
|
}
|
|
|
|
return OK;
|
|
}
|
|
|