2012-02-13 16:28:04 +01:00
|
|
|
#ifndef __VFS_PROTO_H__
|
|
|
|
#define __VFS_PROTO_H__
|
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* Function prototypes. */
|
|
|
|
|
2005-06-17 15:41:12 +02:00
|
|
|
#include "timers.h"
|
2006-10-25 15:40:36 +02:00
|
|
|
#include "request.h"
|
2012-02-13 16:28:04 +01:00
|
|
|
#include "tll.h"
|
|
|
|
#include "threads.h"
|
2010-04-09 23:56:44 +02:00
|
|
|
#include <minix/rs.h>
|
2006-06-20 12:12:09 +02:00
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* Structs used in prototypes must be declared as such first. */
|
2012-02-13 16:28:04 +01:00
|
|
|
struct filp;
|
2006-05-11 16:57:23 +02:00
|
|
|
struct fproc;
|
2006-10-25 15:40:36 +02:00
|
|
|
struct vmnt;
|
|
|
|
struct vnode;
|
2012-02-13 16:28:04 +01:00
|
|
|
struct lookup;
|
|
|
|
struct worker_thread;
|
|
|
|
struct job;
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2010-08-30 15:44:07 +02:00
|
|
|
typedef struct filp * filp_id_t;
|
|
|
|
|
2012-02-13 16:28:04 +01:00
|
|
|
/* comm.c */
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
int drv_sendrec(endpoint_t drv_e, message *reqm);
|
2012-03-24 16:16:34 +01:00
|
|
|
void fs_cancel(struct vmnt *vmp);
|
|
|
|
int fs_sendrec(endpoint_t fs_e, message *reqm);
|
|
|
|
void fs_sendmore(struct vmnt *vmp);
|
|
|
|
void send_work(void);
|
2012-02-13 16:28:04 +01:00
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* device.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int dev_open(dev_t dev, endpoint_t proc_e, int flags);
|
|
|
|
int dev_reopen(dev_t dev, int filp_no, int flags);
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
void dev_reply(struct dmap *dp);
|
2012-03-24 16:16:34 +01:00
|
|
|
int dev_close(dev_t dev, int filp_no);
|
|
|
|
int bdev_open(dev_t dev, int access);
|
|
|
|
int bdev_close(dev_t dev);
|
|
|
|
int dev_io(int op, dev_t dev, endpoint_t proc_e, void *buf, u64_t pos,
|
|
|
|
size_t bytes, int flags, int suspend_reopen);
|
|
|
|
int gen_opcl(int op, dev_t dev, endpoint_t task_nr, int flags);
|
|
|
|
int gen_io(endpoint_t driver_e, message *mess_ptr);
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
int asyn_io(endpoint_t drv_e, message *mess_ptr);
|
2012-03-24 16:16:34 +01:00
|
|
|
int no_dev(int op, dev_t dev, int proc, int flags);
|
|
|
|
int no_dev_io(int, message *);
|
|
|
|
int tty_opcl(int op, dev_t dev, endpoint_t proc, int flags);
|
|
|
|
int ctty_opcl(int op, dev_t dev, endpoint_t proc, int flags);
|
|
|
|
int clone_opcl(int op, dev_t dev, int proc, int flags);
|
|
|
|
int ctty_io(int task_nr, message *mess_ptr);
|
|
|
|
int do_ioctl(void);
|
2012-04-13 14:50:38 +02:00
|
|
|
void pm_setsid(endpoint_t proc_e);
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
void dev_status(endpoint_t drv_e);
|
2012-03-24 16:16:34 +01:00
|
|
|
void bdev_up(int major);
|
|
|
|
void cdev_up(int major);
|
|
|
|
endpoint_t find_suspended_ep(endpoint_t driver, cp_grant_id_t g);
|
|
|
|
void reopen_reply(void);
|
|
|
|
void open_reply(void);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* dmap.c */
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
void lock_dmap(struct dmap *dp);
|
|
|
|
void unlock_dmap(struct dmap *dp);
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_mapdriver(void);
|
|
|
|
void init_dmap(void);
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
void init_dmap_locks(void);
|
2012-03-24 16:16:34 +01:00
|
|
|
int dmap_driver_match(endpoint_t proc, int major);
|
|
|
|
void dmap_endpt_up(int proc_nr, int is_blk);
|
|
|
|
void dmap_unmap_by_endpt(int proc_nr);
|
|
|
|
struct dmap *get_dmap(endpoint_t proc_e);
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
struct dmap *get_dmap_by_major(int major);
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_mapdriver(void);
|
|
|
|
int map_service(struct rprocpub *rpub);
|
|
|
|
void dmap_unmap_by_endpt(int proc_nr);
|
|
|
|
int map_driver(const char *label, int major, endpoint_t proc_nr, int
|
|
|
|
dev_style, int flags);
|
|
|
|
int map_service(struct rprocpub *rpub);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2012-02-13 16:28:04 +01:00
|
|
|
/* elf_core_dump.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
void write_elf_core_file(struct filp *f, int csig, char *exe_name);
|
2012-02-13 16:28:04 +01:00
|
|
|
|
2006-05-11 16:57:23 +02:00
|
|
|
/* exec.c */
|
2012-04-03 15:52:25 +02:00
|
|
|
int pm_exec(endpoint_t proc_e, vir_bytes path, size_t path_len, vir_bytes frame,
|
|
|
|
size_t frame_len, vir_bytes *pc, vir_bytes *newsp, int flags);
|
2006-05-11 16:57:23 +02:00
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* filedes.c */
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
void *do_filp_gc(void *arg);
|
2012-03-24 16:16:34 +01:00
|
|
|
void check_filp_locks(void);
|
|
|
|
void check_filp_locks_by_me(void);
|
|
|
|
void init_filps(void);
|
|
|
|
struct filp *find_filp(struct vnode *vp, mode_t bits);
|
|
|
|
int get_fd(int start, mode_t bits, int *k, struct filp **fpt);
|
|
|
|
struct filp *get_filp(int fild, tll_access_t locktype);
|
|
|
|
struct filp *get_filp2(struct fproc *rfp, int fild, tll_access_t
|
|
|
|
locktype);
|
|
|
|
void lock_filp(struct filp *filp, tll_access_t locktype);
|
|
|
|
void unlock_filp(struct filp *filp);
|
|
|
|
void unlock_filps(struct filp *filp1, struct filp *filp2);
|
|
|
|
int invalidate_filp(struct filp *);
|
|
|
|
void invalidate_filp_by_endpt(endpoint_t proc_e);
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
void invalidate_filp_by_char_major(int major);
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_verify_fd(void);
|
|
|
|
int set_filp(filp_id_t sfilp);
|
|
|
|
int do_set_filp(void);
|
|
|
|
int copy_filp(endpoint_t to_ep, filp_id_t cfilp);
|
|
|
|
int do_copy_filp(void);
|
|
|
|
int put_filp(filp_id_t pfilp);
|
|
|
|
int do_put_filp(void);
|
|
|
|
int cancel_fd(endpoint_t ep, int fd);
|
|
|
|
int do_cancel_fd(void);
|
|
|
|
void close_filp(struct filp *fp);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2009-05-11 12:02:28 +02:00
|
|
|
/* fscall.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
void nested_fs_call(message *m);
|
2009-05-11 12:02:28 +02:00
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* link.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_link(void);
|
|
|
|
int do_unlink(void);
|
|
|
|
int do_rename(void);
|
|
|
|
int do_truncate(void);
|
|
|
|
int do_ftruncate(void);
|
|
|
|
int truncate_vnode(struct vnode *vp, off_t newsize);
|
|
|
|
int rdlink_direct(char *orig_path, char *link_path, struct fproc *rfp);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* lock.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int lock_op(struct filp *f, int req);
|
|
|
|
void lock_revive(void);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* main.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int main(void);
|
|
|
|
void lock_proc(struct fproc *rfp, int force_lock);
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
void reply(endpoint_t whom, int result);
|
|
|
|
void thread_cleanup(struct fproc *rfp);
|
2012-03-24 16:16:34 +01:00
|
|
|
void unlock_proc(struct fproc *rfp);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* misc.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
void pm_exit(int proc);
|
|
|
|
int do_fcntl(void);
|
|
|
|
void pm_fork(int pproc, int cproc, int cpid);
|
|
|
|
void pm_setgid(int proc_e, int egid, int rgid);
|
|
|
|
void pm_setuid(int proc_e, int euid, int ruid);
|
|
|
|
void pm_setgroups(int proc_e, int ngroups, gid_t *addr);
|
|
|
|
int do_sync(void);
|
|
|
|
int do_fsync(void);
|
|
|
|
void pm_reboot(void);
|
|
|
|
int do_svrctl(void);
|
|
|
|
int do_getsysinfo(void);
|
|
|
|
int pm_dumpcore(endpoint_t proc_e, int sig, vir_bytes exe_name);
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
void * ds_event(void *arg);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* mount.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_fsready(void);
|
|
|
|
int do_mount(void);
|
|
|
|
int do_umount(void);
|
|
|
|
int is_nonedev(dev_t dev);
|
|
|
|
void mount_pfs(void);
|
2012-11-22 23:00:00 +01:00
|
|
|
int mount_fs(dev_t dev, char mount_dev[PATH_MAX], char mount_path[PATH_MAX],
|
|
|
|
endpoint_t fs_e, int rdonly, char mount_label[LABEL_MAX]);
|
2012-07-13 18:08:06 +02:00
|
|
|
int unmount(dev_t dev, char label[LABEL_MAX]);
|
2012-11-14 14:18:16 +01:00
|
|
|
void unmount_all(int force);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* open.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_close(void);
|
|
|
|
int close_fd(struct fproc *rfp, int fd_nr);
|
|
|
|
void close_reply(void);
|
|
|
|
int common_open(char path[PATH_MAX], int oflags, mode_t omode);
|
|
|
|
int do_creat(void);
|
|
|
|
int do_lseek(void);
|
|
|
|
int do_llseek(void);
|
|
|
|
int do_mknod(void);
|
|
|
|
int do_mkdir(void);
|
|
|
|
int do_open(void);
|
|
|
|
int do_slink(void);
|
|
|
|
int do_vm_open(void);
|
|
|
|
int do_vm_close(void);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* path.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
struct vnode *advance(struct vnode *dirp, struct lookup *resolve, struct
|
|
|
|
fproc *rfp);
|
|
|
|
struct vnode *eat_path(struct lookup *resolve, struct fproc *rfp);
|
|
|
|
struct vnode *last_dir(struct lookup *resolve, struct fproc *rfp);
|
|
|
|
void lookup_init(struct lookup *resolve, char *path, int flags, struct
|
|
|
|
vmnt **vmp, struct vnode **vp);
|
|
|
|
int get_name(struct vnode *dirp, struct vnode *entry, char *_name);
|
|
|
|
int canonical_path(char *orig_path, struct fproc *rfp);
|
|
|
|
int do_check_perms(void);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* pipe.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_pipe(void);
|
2013-02-25 15:45:22 +01:00
|
|
|
int do_pipe2(void);
|
2012-03-24 16:16:34 +01:00
|
|
|
int map_vnode(struct vnode *vp, endpoint_t fs_e);
|
2012-04-13 14:50:38 +02:00
|
|
|
void unpause(endpoint_t proc_e);
|
2013-02-25 12:36:29 +01:00
|
|
|
int pipe_check(struct filp *filp, int rw_flag, int oflags, int bytes,
|
2012-12-11 20:46:09 +01:00
|
|
|
int notouch);
|
2012-04-13 14:50:38 +02:00
|
|
|
void release(struct vnode *vp, int op, int count);
|
|
|
|
void revive(endpoint_t proc_e, int returned);
|
|
|
|
void suspend(int why);
|
2012-03-24 16:16:34 +01:00
|
|
|
void pipe_suspend(struct filp *rfilp, char *buf, size_t size);
|
2012-04-13 14:50:38 +02:00
|
|
|
void unsuspend_by_endpt(endpoint_t proc_e);
|
|
|
|
void wait_for(endpoint_t proc_e);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* protect.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_access(void);
|
|
|
|
int do_chmod(void);
|
|
|
|
int do_chown(void);
|
|
|
|
int do_umask(void);
|
|
|
|
int forbidden(struct fproc *rfp, struct vnode *vp, mode_t
|
|
|
|
access_desired);
|
|
|
|
int read_only(struct vnode *vp);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* read.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_read(void);
|
|
|
|
int do_getdents(void);
|
|
|
|
void lock_bsf(void);
|
|
|
|
void unlock_bsf(void);
|
2012-09-27 22:23:49 +02:00
|
|
|
void check_bsf_lock(void);
|
2013-01-13 22:44:38 +01:00
|
|
|
int do_read_write_peek(int rw_flag, int fd, char *buf, size_t bytes);
|
2012-03-24 16:16:34 +01:00
|
|
|
int read_write(int rw_flag, struct filp *f, char *buffer, size_t nbytes,
|
|
|
|
endpoint_t for_e);
|
|
|
|
int rw_pipe(int rw_flag, endpoint_t usr, struct filp *f, char *buf,
|
|
|
|
size_t req_size);
|
2006-10-25 15:40:36 +02:00
|
|
|
|
|
|
|
/* request.c */
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_breadwrite(endpoint_t fs_e, endpoint_t user_e, dev_t dev, u64_t pos,
|
|
|
|
unsigned int num_of_bytes, char *user_addr, int rw_flag,
|
|
|
|
u64_t *new_posp, unsigned int *cum_iop);
|
|
|
|
int req_chmod(int fs_e, ino_t inode_nr, mode_t rmode, mode_t *new_modep);
|
|
|
|
int req_chown(endpoint_t fs_e, ino_t inode_nr, uid_t newuid, gid_t newgid,
|
|
|
|
mode_t *new_modep);
|
|
|
|
int req_create(int fs_e, ino_t inode_nr, int omode, uid_t uid, gid_t gid,
|
|
|
|
char *path, node_details_t *res);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_flush(endpoint_t fs_e, dev_t dev);
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_fstatfs(endpoint_t fs_e, endpoint_t proc_e, vir_bytes buf);
|
|
|
|
int req_statvfs(endpoint_t fs_e, endpoint_t proc_e, vir_bytes buf);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_ftrunc(endpoint_t fs_e, ino_t inode_nr, off_t start, off_t end);
|
|
|
|
int req_getdents(endpoint_t fs_e, ino_t inode_nr, u64_t pos, char *buf,
|
|
|
|
size_t size, u64_t *new_pos, int direct);
|
|
|
|
int req_inhibread(endpoint_t fs_e, ino_t inode_nr);
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_link(endpoint_t fs_e, ino_t link_parent, char *lastc,
|
|
|
|
ino_t linked_file);
|
|
|
|
int req_lookup(endpoint_t fs_e, ino_t dir_ino, ino_t root_ino, uid_t uid,
|
|
|
|
gid_t gid, struct lookup *resolve, lookup_res_t *res,
|
|
|
|
struct fproc *rfp);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_mkdir(endpoint_t fs_e, ino_t inode_nr, char *lastc, uid_t uid,
|
|
|
|
gid_t gid, mode_t dmode);
|
|
|
|
int req_mknod(endpoint_t fs_e, ino_t inode_nr, char *lastc, uid_t uid,
|
|
|
|
gid_t gid, mode_t dmode, dev_t dev);
|
|
|
|
int req_mountpoint(endpoint_t fs_e, ino_t inode_nr);
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_newnode(endpoint_t fs_e, uid_t uid, gid_t gid, mode_t dmode, dev_t dev,
|
|
|
|
struct node_details *res);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_putnode(int fs_e, ino_t inode_nr, int count);
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_rdlink(endpoint_t fs_e, ino_t inode_nr, endpoint_t proc_e,
|
|
|
|
vir_bytes buf, size_t len, int direct);
|
|
|
|
int req_readsuper(endpoint_t fs_e, char *driver_name, dev_t dev, int readonly,
|
|
|
|
int isroot, struct node_details *res_nodep, int *con_reqs);
|
|
|
|
int req_readwrite(endpoint_t fs_e, ino_t inode_nr, u64_t pos, int rw_flag,
|
|
|
|
endpoint_t user_e, char *user_addr, unsigned int num_of_bytes,
|
2012-03-24 16:16:34 +01:00
|
|
|
u64_t *new_posp, unsigned int *cum_iop);
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_rename(endpoint_t fs_e, ino_t old_dir, char *old_name, ino_t new_dir,
|
|
|
|
char *new_name);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_rmdir(endpoint_t fs_e, ino_t inode_nr, char *lastc);
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_slink(endpoint_t fs_e, ino_t inode_nr, char *lastc, endpoint_t proc_e,
|
|
|
|
vir_bytes path_addr, size_t path_length, uid_t uid, gid_t gid);
|
2013-02-28 15:44:23 +01:00
|
|
|
int req_stat(endpoint_t fs_e, ino_t inode_nr, endpoint_t proc_e, vir_bytes buf);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_sync(endpoint_t fs_e);
|
|
|
|
int req_unlink(endpoint_t fs_e, ino_t inode_nr, char *lastc);
|
|
|
|
int req_unmount(endpoint_t fs_e);
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_utime(endpoint_t fs_e, ino_t inode_nr, time_t actime, time_t modtime);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_newdriver(endpoint_t fs_e, dev_t dev, char *label);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* stadir.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_chdir(void);
|
|
|
|
int do_fchdir(void);
|
|
|
|
int do_chroot(void);
|
|
|
|
int do_fstat(void);
|
|
|
|
int do_stat(void);
|
|
|
|
int do_fstatfs(void);
|
|
|
|
int do_statvfs(void);
|
|
|
|
int do_fstatvfs(void);
|
|
|
|
int do_rdlink(void);
|
|
|
|
int do_lstat(void);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* time.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_utime(void);
|
2005-05-31 11:50:51 +02:00
|
|
|
|
2012-02-13 16:28:04 +01:00
|
|
|
/* tll.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
void tll_downgrade(tll_t *tllp);
|
|
|
|
int tll_haspendinglock(tll_t *tllp);
|
|
|
|
void tll_init(tll_t *tllp);
|
|
|
|
int tll_islocked(tll_t *tllp);
|
|
|
|
int tll_lock(tll_t *tllp, tll_access_t locktype);
|
|
|
|
int tll_locked_by_me(tll_t *tllp);
|
|
|
|
void tll_lockstat(tll_t *tllp);
|
|
|
|
int tll_unlock(tll_t *tllp);
|
|
|
|
void tll_upgrade(tll_t *tllp);
|
2012-02-13 16:28:04 +01:00
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* utility.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
time_t clock_time(void);
|
|
|
|
unsigned conv2(int norm, int w);
|
|
|
|
long conv4(int norm, long x);
|
2012-04-13 14:50:38 +02:00
|
|
|
int copy_name(size_t len, char *dest);
|
|
|
|
int fetch_name(vir_bytes path, size_t len, char *dest);
|
2012-03-24 16:16:34 +01:00
|
|
|
int no_sys(void);
|
|
|
|
int isokendpt_f(char *f, int l, endpoint_t e, int *p, int ft);
|
|
|
|
int in_group(struct fproc *rfp, gid_t grp);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
endpoint-aware conversion of servers.
'who', indicating caller number in pm and fs and some other servers, has
been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.).
In both PM and FS, isokendpt() convert endpoints to process slot
numbers, returning OK if it was a valid and consistent endpoint number.
okendpt() does the same but panic()s if it doesn't succeed. (In PM,
this is pm_isok..)
pm and fs keep their own records of process endpoints in their proc tables,
which are needed to make kernel calls about those processes.
message field names have changed.
fs drivers are endpoints.
fs now doesn't try to get out of driver deadlock, as the protocol isn't
supposed to let that happen any more. (A warning is printed if ELOCKED
is detected though.)
fproc[].fp_task (indicating which driver the process is suspended on)
became an int.
PM and FS now get endpoint numbers of initial boot processes from the
kernel. These happen to be the same as the old proc numbers, to let
user processes reach them with the old numbers, but FS and PM don't know
that. All new processes after INIT, even after the generation number
wraps around, get endpoint numbers with generation 1 and higher, so
the first instances of the boot processes are the only processes ever
to have endpoint numbers in the old proc number range.
More return code checks of sys_* functions have been added.
IS has become endpoint-aware. Ditched the 'text' and 'data' fields
in the kernel dump (which show locations, not sizes, so aren't terribly
useful) in favour of the endpoint number. Proc number is still visible.
Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got
the formatting changed.
PM reading segments using rw_seg() has changed - it uses other fields
in the message now instead of encoding the segment and process number and
fd in the fd field. For that it uses _read_pm() and _write_pm() which to
_taskcall()s directly in pm/misc.c.
PM now sys_exit()s itself on panic(), instead of sys_abort().
RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
|
|
|
#define okendpt(e, p) isokendpt_f(__FILE__, __LINE__, (e), (p), 1)
|
|
|
|
#define isokendpt(e, p) isokendpt_f(__FILE__, __LINE__, (e), (p), 0)
|
|
|
|
|
2006-10-25 15:40:36 +02:00
|
|
|
/* vmnt.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
void check_vmnt_locks(void);
|
|
|
|
void check_vmnt_locks_by_me(struct fproc *rfp);
|
|
|
|
void mark_vmnt_free(struct vmnt *vmp);
|
|
|
|
struct vmnt *get_free_vmnt(void);
|
|
|
|
struct vmnt *find_vmnt(endpoint_t fs_e);
|
|
|
|
struct vmnt *get_locked_vmnt(struct fproc *rfp);
|
|
|
|
void init_vmnts(void);
|
|
|
|
int lock_vmnt(struct vmnt *vp, tll_access_t locktype);
|
|
|
|
void unlock_vmnt(struct vmnt *vp);
|
|
|
|
void vmnt_unmap_by_endpt(endpoint_t proc_e);
|
2012-11-27 18:33:59 +01:00
|
|
|
void fetch_vmnt_paths(void);
|
VFS: fix locking bugs
.sync and fsync used unnecessarily restrictive locking type
.fsync violated locking order by obtaining a vmnt lock after a filp lock
.fsync contained a TOCTOU bug
.new_node violated locking rules (didn't upgrade lock upon file creation)
.do_pipe used unnecessarily restrictive locking type
.always lock pipes exclusively; even a read operation might require to do
a write on a vnode object (update pipe size)
.when opening a file with O_TRUNC, upgrade vnode lock when truncating
.utime used unnecessarily restrictive locking type
.path parsing:
.always acquire VMNT_WRITE or VMNT_EXCL on vmnt and downgrade to
VMNT_READ if that was what was actually requested. This prevents the
following deadlock scenario:
thread A:
lock_vmnt(vmp, TLL_READSER);
lock_vnode(vp, TLL_READSER);
upgrade_vmnt_lock(vmp, TLL_WRITE);
thread B:
lock_vmnt(vmp, TLL_READ);
lock_vnode(vp, TLL_READSER);
thread A will be stuck in upgrade_vmnt_lock and thread B is stuck in
lock_vnode. This happens when, for example, thread A tries create a
new node (open.c:new_node) and thread B tries to do eat_path to
change dir (stadir.c:do_chdir). When the path is being resolved, a
vnode is always locked with VNODE_OPCL (TLL_READSER) and then
downgraded to VNODE_READ if read-only is actually requested. Thread
A locks the vmnt with VMNT_WRITE (TLL_READSER) which still allows
VMNT_READ locks. Thread B can't acquire a lock on the vnode because
thread A has it; Thread A can't upgrade its vmnt lock to VMNT_WRITE
(TLL_WRITE) because thread B has a VMNT_READ lock on it.
By serializing vmnt locks during path parsing, thread B can only
acquire a lock on vmp when thread A has completely finished its
operation.
2012-11-30 13:49:53 +01:00
|
|
|
void upgrade_vmnt_lock(struct vmnt *vmp);
|
|
|
|
void downgrade_vmnt_lock(struct vmnt *vmp);
|
2006-10-25 15:40:36 +02:00
|
|
|
|
|
|
|
/* vnode.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
void check_vnode_locks(void);
|
|
|
|
void check_vnode_locks_by_me(struct fproc *rfp);
|
|
|
|
struct vnode *get_free_vnode(void);
|
2012-04-02 17:16:44 +02:00
|
|
|
struct vnode *find_vnode(int fs_e, ino_t inode);
|
2012-03-24 16:16:34 +01:00
|
|
|
void init_vnodes(void);
|
|
|
|
int is_vnode_locked(struct vnode *vp);
|
|
|
|
int lock_vnode(struct vnode *vp, tll_access_t locktype);
|
|
|
|
void unlock_vnode(struct vnode *vp);
|
|
|
|
void dup_vnode(struct vnode *vp);
|
|
|
|
void put_vnode(struct vnode *vp);
|
|
|
|
void vnode_clean_refs(struct vnode *vp);
|
VFS: fix locking bugs
.sync and fsync used unnecessarily restrictive locking type
.fsync violated locking order by obtaining a vmnt lock after a filp lock
.fsync contained a TOCTOU bug
.new_node violated locking rules (didn't upgrade lock upon file creation)
.do_pipe used unnecessarily restrictive locking type
.always lock pipes exclusively; even a read operation might require to do
a write on a vnode object (update pipe size)
.when opening a file with O_TRUNC, upgrade vnode lock when truncating
.utime used unnecessarily restrictive locking type
.path parsing:
.always acquire VMNT_WRITE or VMNT_EXCL on vmnt and downgrade to
VMNT_READ if that was what was actually requested. This prevents the
following deadlock scenario:
thread A:
lock_vmnt(vmp, TLL_READSER);
lock_vnode(vp, TLL_READSER);
upgrade_vmnt_lock(vmp, TLL_WRITE);
thread B:
lock_vmnt(vmp, TLL_READ);
lock_vnode(vp, TLL_READSER);
thread A will be stuck in upgrade_vmnt_lock and thread B is stuck in
lock_vnode. This happens when, for example, thread A tries create a
new node (open.c:new_node) and thread B tries to do eat_path to
change dir (stadir.c:do_chdir). When the path is being resolved, a
vnode is always locked with VNODE_OPCL (TLL_READSER) and then
downgraded to VNODE_READ if read-only is actually requested. Thread
A locks the vmnt with VMNT_WRITE (TLL_READSER) which still allows
VMNT_READ locks. Thread B can't acquire a lock on the vnode because
thread A has it; Thread A can't upgrade its vmnt lock to VMNT_WRITE
(TLL_WRITE) because thread B has a VMNT_READ lock on it.
By serializing vmnt locks during path parsing, thread B can only
acquire a lock on vmp when thread A has completely finished its
operation.
2012-11-30 13:49:53 +01:00
|
|
|
void upgrade_vnode_lock(struct vnode *vp);
|
2006-10-25 15:40:36 +02:00
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* write.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_write(void);
|
2005-06-06 13:40:32 +02:00
|
|
|
|
2010-08-25 15:06:43 +02:00
|
|
|
/* gcov.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_gcov_flush(void);
|
2011-08-09 10:39:33 +02:00
|
|
|
#if ! USE_COVERAGE
|
|
|
|
#define do_gcov_flush no_sys
|
|
|
|
#endif
|
2010-08-25 15:06:43 +02:00
|
|
|
|
2005-06-06 13:40:32 +02:00
|
|
|
/* select.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_select(void);
|
|
|
|
void init_select(void);
|
|
|
|
void select_callback(struct filp *, int ops);
|
|
|
|
void select_forget(endpoint_t proc_e);
|
|
|
|
void select_reply1(endpoint_t driver_e, int minor, int status);
|
|
|
|
void select_reply2(endpoint_t driver_e, int minor, int status);
|
|
|
|
void select_timeout_check(timer_t *);
|
|
|
|
void select_unsuspend_by_endpt(endpoint_t proc);
|
2011-07-30 08:03:23 +02:00
|
|
|
|
2012-02-13 16:28:04 +01:00
|
|
|
/* worker.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int worker_available(void);
|
|
|
|
struct worker_thread *worker_get(thread_t worker_tid);
|
|
|
|
struct job *worker_getjob(thread_t worker_tid);
|
|
|
|
void worker_init(struct worker_thread *worker);
|
|
|
|
void worker_signal(struct worker_thread *worker);
|
|
|
|
void worker_start(void *(*func)(void *arg));
|
|
|
|
void worker_stop(struct worker_thread *worker);
|
|
|
|
void worker_stop_by_endpt(endpoint_t proc_e);
|
|
|
|
void worker_wait(void);
|
|
|
|
void sys_worker_start(void *(*func)(void *arg));
|
|
|
|
void dl_worker_start(void *(*func)(void *arg));
|
2012-02-13 16:28:04 +01:00
|
|
|
#endif
|