minix/servers/vfs/dmap.c

262 lines
6.7 KiB
C
Raw Normal View History

2005-04-21 16:53:53 +02:00
/* This file contains the table with device <-> driver mappings. It also
* contains some routines to dynamically add and/ or remove device drivers
* or change mappings.
*/
#include "fs.h"
#include "fproc.h"
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <unistd.h>
2005-04-21 16:53:53 +02:00
#include <minix/com.h>
2007-08-07 14:52:47 +02:00
#include <minix/ds.h>
#include "param.h"
2005-04-21 16:53:53 +02:00
2005-08-03 13:53:36 +02:00
#define NC(x) (NR_CTRLRS >= (x))
2005-04-21 16:53:53 +02:00
/* The order of the entries in the table determines the mapping between major
* device numbers and device drivers. Character and block devices
* can be intermixed at random. The ordering determines the device numbers in
* /dev. Note that the major device numbers used in /dev are NOT the same as
* the process numbers of the device drivers. See <minix/dmap.h> for mappings.
2005-04-21 16:53:53 +02:00
*/
struct dmap dmap[NR_DEVICES];
#define DT_EMPTY { no_dev, no_dev_io, NONE, "", 0, STYLE_NDEV, NULL }
2005-04-21 16:53:53 +02:00
2007-08-07 14:52:47 +02:00
/*===========================================================================*
* do_mapdriver *
*===========================================================================*/
PUBLIC int do_mapdriver()
{
int r, flags, major;
Driver refactory for live update and crash recovery. SYSLIB CHANGES: - DS calls to publish / retrieve labels consider endpoints instead of u32_t. VFS CHANGES: - mapdriver() only adds an entry in the dmap table in VFS. - dev_up() is only executed upon reception of a driver up event. INET CHANGES: - INET no longer searches for existing drivers instances at startup. - A newtwork driver is (re)initialized upon reception of a driver up event. - Networking startup is now race-free by design. No need to waste 5 seconds at startup any more. DRIVER CHANGES: - Every driver publishes driver up events when starting for the first time or in case of restart when recovery actions must be taken in the upper layers. - Driver up events are published by drivers through DS. - For regular drivers, VFS is normally the only subscriber, but not necessarily. For instance, when the filter driver is in use, it must subscribe to driver up events to initiate recovery. - For network drivers, inet is the only subscriber for now. - Every VFS driver is statically linked with libdriver, every network driver is statically linked with libnetdriver. DRIVER LIBRARIES CHANGES: - Libdriver is extended to provide generic receive() and ds_publish() interfaces for VFS drivers. - driver_receive() is a wrapper for sef_receive() also used in driver_task() to discard spurious messages that were meant to be delivered to a previous version of the driver. - driver_receive_mq() is the same as driver_receive() but integrates support for queued messages. - driver_announce() publishes a driver up event for VFS drivers and marks the driver as initialized and expecting a DEV_OPEN message. - Libnetdriver is introduced to provide similar receive() and ds_publish() interfaces for network drivers (netdriver_announce() and netdriver_receive()). - Network drivers all support live update with no state transfer now. KERNEL CHANGES: - Added kernel call statectl for state management. Used by driver_announce() to unblock eventual callers sendrecing to the driver.
2010-04-08 15:41:35 +02:00
endpoint_t endpoint;
2007-08-07 14:52:47 +02:00
vir_bytes label_vir;
size_t label_len;
char label[LABEL_MAX];
2007-08-07 14:52:47 +02:00
/* Only RS can map drivers. */
if (who_e != RS_PROC_NR)
2007-08-07 14:52:47 +02:00
{
printf("vfs: unauthorized call of do_mapdriver by proc %d\n",
2007-08-07 14:52:47 +02:00
who_e);
return(EPERM);
2007-08-07 14:52:47 +02:00
}
/* Get the label */
label_vir= (vir_bytes)m_in.md_label;
label_len= m_in.md_label_len;
if (label_len+1 > sizeof(label))
{
printf("vfs:do_mapdriver: label too long\n");
return EINVAL;
}
r= sys_vircopy(who_e, D, label_vir, SELF, D, (vir_bytes)label,
label_len);
if (r != OK)
{
printf("vfs:do_mapdriver: sys_vircopy failed: %d\n", r);
return EINVAL;
}
label[label_len]= '\0';
Driver refactory for live update and crash recovery. SYSLIB CHANGES: - DS calls to publish / retrieve labels consider endpoints instead of u32_t. VFS CHANGES: - mapdriver() only adds an entry in the dmap table in VFS. - dev_up() is only executed upon reception of a driver up event. INET CHANGES: - INET no longer searches for existing drivers instances at startup. - A newtwork driver is (re)initialized upon reception of a driver up event. - Networking startup is now race-free by design. No need to waste 5 seconds at startup any more. DRIVER CHANGES: - Every driver publishes driver up events when starting for the first time or in case of restart when recovery actions must be taken in the upper layers. - Driver up events are published by drivers through DS. - For regular drivers, VFS is normally the only subscriber, but not necessarily. For instance, when the filter driver is in use, it must subscribe to driver up events to initiate recovery. - For network drivers, inet is the only subscriber for now. - Every VFS driver is statically linked with libdriver, every network driver is statically linked with libnetdriver. DRIVER LIBRARIES CHANGES: - Libdriver is extended to provide generic receive() and ds_publish() interfaces for VFS drivers. - driver_receive() is a wrapper for sef_receive() also used in driver_task() to discard spurious messages that were meant to be delivered to a previous version of the driver. - driver_receive_mq() is the same as driver_receive() but integrates support for queued messages. - driver_announce() publishes a driver up event for VFS drivers and marks the driver as initialized and expecting a DEV_OPEN message. - Libnetdriver is introduced to provide similar receive() and ds_publish() interfaces for network drivers (netdriver_announce() and netdriver_receive()). - Network drivers all support live update with no state transfer now. KERNEL CHANGES: - Added kernel call statectl for state management. Used by driver_announce() to unblock eventual callers sendrecing to the driver.
2010-04-08 15:41:35 +02:00
r= ds_retrieve_label_endpt(label, &endpoint);
2007-08-07 14:52:47 +02:00
if (r != OK)
{
printf("vfs:do_mapdriver: ds doesn't know '%s'\n", label);
return EINVAL;
}
/* Try to update device mapping. */
major= m_in.md_major;
flags= m_in.md_flags;
r= map_driver(label, major, endpoint, m_in.md_style, flags);
2007-08-07 14:52:47 +02:00
return(r);
}
2005-04-21 16:53:53 +02:00
/*===========================================================================*
* map_driver *
*===========================================================================*/
PUBLIC int map_driver(label, major, proc_nr_e, style, flags)
const char *label; /* name of the driver */
2007-08-07 14:52:47 +02:00
int major; /* major number of the device */
endpoint_t proc_nr_e; /* process number of the driver */
int style; /* style of the device */
int flags; /* device flags */
2007-08-07 14:52:47 +02:00
{
/* Set a new device driver mapping in the dmap table.
* If the proc_nr is set to NONE, we're supposed to unmap it.
2007-08-07 14:52:47 +02:00
*/
int proc_nr_n;
size_t len;
struct dmap *dp;
/* Get pointer to device entry in the dmap table. */
if (major < 0 || major >= NR_DEVICES) return(ENODEV);
dp = &dmap[major];
/* Check if we're supposed to unmap it. */
2007-08-07 14:52:47 +02:00
if(proc_nr_e == NONE) {
dp->dmap_opcl = no_dev;
dp->dmap_io = no_dev_io;
dp->dmap_driver = NONE;
dp->dmap_flags = flags;
2007-08-07 14:52:47 +02:00
return(OK);
}
/* Check process number of new driver if requested. */
if (! (flags & DRV_FORCED))
2007-08-07 14:52:47 +02:00
{
if (isokendpt(proc_nr_e, &proc_nr_n) != OK)
return(EINVAL);
}
if (label != NULL) {
len= strlen(label);
if (len+1 > sizeof(dp->dmap_label))
panic("map_driver: label too long: %d", len);
strcpy(dp->dmap_label, label);
}
2007-08-07 14:52:47 +02:00
/* Try to update the entry. */
switch (style) {
case STYLE_DEV:
dp->dmap_opcl = gen_opcl;
dp->dmap_io = gen_io;
break;
case STYLE_DEVA:
dp->dmap_opcl = gen_opcl;
dp->dmap_io = asyn_io;
break;
case STYLE_TTY:
dp->dmap_opcl = tty_opcl;
dp->dmap_io = gen_io;
break;
case STYLE_CTTY:
dp->dmap_opcl = ctty_opcl;
dp->dmap_io = ctty_io;
break;
case STYLE_CLONE:
dp->dmap_opcl = clone_opcl;
dp->dmap_io = gen_io;
break;
default:
return(EINVAL);
2007-08-07 14:52:47 +02:00
}
dp->dmap_driver = proc_nr_e;
dp->dmap_flags = flags;
dp->dmap_style = style;
2007-08-07 14:52:47 +02:00
return(OK);
}
/*===========================================================================*
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
* dmap_unmap_by_endpt *
*===========================================================================*/
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
PUBLIC void dmap_unmap_by_endpt(int proc_nr_e)
{
int i, r;
for (i=0; i<NR_DEVICES; i++)
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
if(dmap[i].dmap_driver && dmap[i].dmap_driver == proc_nr_e)
if((r=map_driver(NULL, i, NONE, 0, 0)) != OK)
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
printf("FS: unmap of p %d / d %d failed: %d\n", proc_nr_e,i,r);
return;
}
/*===========================================================================*
* map_service *
*===========================================================================*/
PUBLIC int map_service(struct rprocpub *rpub)
{
/* Map a new service by storing its device driver properties. */
int r;
/* Not a driver, nothing more to do. */
if(!rpub->dev_nr) {
return OK;
}
/* Map driver. */
r = map_driver(rpub->label, rpub->dev_nr, rpub->endpoint,
rpub->dev_style, rpub->dev_flags);
if(r != OK) {
return r;
}
/* If driver has two major numbers associated, also map the other one. */
if(rpub->dev_style2 != STYLE_NDEV) {
r = map_driver(rpub->label, rpub->dev_nr+1, rpub->endpoint,
rpub->dev_style2, rpub->dev_flags);
if(r != OK) {
return r;
}
}
return OK;
}
2005-04-21 16:53:53 +02:00
/*===========================================================================*
* build_dmap *
2005-04-21 16:53:53 +02:00
*===========================================================================*/
PUBLIC void build_dmap()
2005-04-21 16:53:53 +02:00
{
/* Initialize the table with empty device <-> driver mappings. */
int i;
struct dmap dmap_default = DT_EMPTY;
for (i=0; i<NR_DEVICES; i++) {
dmap[i] = dmap_default;
}
2005-04-21 16:53:53 +02:00
}
/*===========================================================================*
* dmap_driver_match *
*===========================================================================*/
PUBLIC int dmap_driver_match(endpoint_t proc, int major)
{
if (major < 0 || major >= NR_DEVICES) return(0);
if(dmap[major].dmap_driver != NONE && dmap[major].dmap_driver == proc)
return 1;
return 0;
}
/*===========================================================================*
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
* dmap_endpt_up *
*===========================================================================*/
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
PUBLIC void dmap_endpt_up(int proc_e)
{
int i;
for (i=0; i<NR_DEVICES; i++) {
if(dmap[i].dmap_driver != NONE
Driver refactory for live update and crash recovery. SYSLIB CHANGES: - DS calls to publish / retrieve labels consider endpoints instead of u32_t. VFS CHANGES: - mapdriver() only adds an entry in the dmap table in VFS. - dev_up() is only executed upon reception of a driver up event. INET CHANGES: - INET no longer searches for existing drivers instances at startup. - A newtwork driver is (re)initialized upon reception of a driver up event. - Networking startup is now race-free by design. No need to waste 5 seconds at startup any more. DRIVER CHANGES: - Every driver publishes driver up events when starting for the first time or in case of restart when recovery actions must be taken in the upper layers. - Driver up events are published by drivers through DS. - For regular drivers, VFS is normally the only subscriber, but not necessarily. For instance, when the filter driver is in use, it must subscribe to driver up events to initiate recovery. - For network drivers, inet is the only subscriber for now. - Every VFS driver is statically linked with libdriver, every network driver is statically linked with libnetdriver. DRIVER LIBRARIES CHANGES: - Libdriver is extended to provide generic receive() and ds_publish() interfaces for VFS drivers. - driver_receive() is a wrapper for sef_receive() also used in driver_task() to discard spurious messages that were meant to be delivered to a previous version of the driver. - driver_receive_mq() is the same as driver_receive() but integrates support for queued messages. - driver_announce() publishes a driver up event for VFS drivers and marks the driver as initialized and expecting a DEV_OPEN message. - Libnetdriver is introduced to provide similar receive() and ds_publish() interfaces for network drivers (netdriver_announce() and netdriver_receive()). - Network drivers all support live update with no state transfer now. KERNEL CHANGES: - Added kernel call statectl for state management. Used by driver_announce() to unblock eventual callers sendrecing to the driver.
2010-04-08 15:41:35 +02:00
&& dmap[i].dmap_driver == proc_e) {
dev_up(i);
}
}
return;
}
/*===========================================================================*
* get_dmap *
*===========================================================================*/
PUBLIC struct dmap *get_dmap(endpoint_t proc_e)
{
/* See if 'proc_e' endpoint belongs to a valid dmap entry. If so, return a
* pointer */
int major;
for (major = 0; major < NR_DEVICES; major++)
if (dmap_driver_match(proc_e, major))
return(&dmap[major]);
return(NULL);
}