2012-02-13 16:28:04 +01:00
|
|
|
#ifndef __VFS_PROTO_H__
|
|
|
|
#define __VFS_PROTO_H__
|
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* Function prototypes. */
|
|
|
|
|
2005-06-17 15:41:12 +02:00
|
|
|
#include "timers.h"
|
2006-10-25 15:40:36 +02:00
|
|
|
#include "request.h"
|
2012-02-13 16:28:04 +01:00
|
|
|
#include "tll.h"
|
|
|
|
#include "threads.h"
|
2010-04-09 23:56:44 +02:00
|
|
|
#include <minix/rs.h>
|
2006-06-20 12:12:09 +02:00
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* Structs used in prototypes must be declared as such first. */
|
2012-02-13 16:28:04 +01:00
|
|
|
struct filp;
|
2006-05-11 16:57:23 +02:00
|
|
|
struct fproc;
|
2011-12-21 23:29:29 +01:00
|
|
|
struct timespec;
|
2006-10-25 15:40:36 +02:00
|
|
|
struct vmnt;
|
|
|
|
struct vnode;
|
2012-02-13 16:28:04 +01:00
|
|
|
struct lookup;
|
|
|
|
struct worker_thread;
|
|
|
|
struct job;
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2010-08-30 15:44:07 +02:00
|
|
|
typedef struct filp * filp_id_t;
|
|
|
|
|
2012-02-13 16:28:04 +01:00
|
|
|
/* comm.c */
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
int drv_sendrec(endpoint_t drv_e, message *reqm);
|
2012-03-24 16:16:34 +01:00
|
|
|
void fs_cancel(struct vmnt *vmp);
|
|
|
|
int fs_sendrec(endpoint_t fs_e, message *reqm);
|
|
|
|
void fs_sendmore(struct vmnt *vmp);
|
|
|
|
void send_work(void);
|
2012-02-13 16:28:04 +01:00
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* device.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int dev_open(dev_t dev, endpoint_t proc_e, int flags);
|
|
|
|
int dev_reopen(dev_t dev, int filp_no, int flags);
|
|
|
|
int dev_close(dev_t dev, int filp_no);
|
2013-08-30 13:42:51 +02:00
|
|
|
void cdev_reply(void);
|
2012-03-24 16:16:34 +01:00
|
|
|
int bdev_open(dev_t dev, int access);
|
|
|
|
int bdev_close(dev_t dev);
|
2013-08-30 13:42:51 +02:00
|
|
|
void bdev_reply(struct dmap *dp);
|
2013-03-25 17:08:04 +01:00
|
|
|
int dev_io(int op, dev_t dev, endpoint_t proc_e, void *buf, off_t pos,
|
2012-03-24 16:16:34 +01:00
|
|
|
size_t bytes, int flags, int suspend_reopen);
|
|
|
|
int gen_opcl(int op, dev_t dev, endpoint_t task_nr, int flags);
|
2013-08-30 13:33:56 +02:00
|
|
|
int gen_io(endpoint_t drv_e, message *mess_ptr);
|
2013-04-23 01:50:45 +02:00
|
|
|
int no_dev(int op, dev_t dev, endpoint_t proc, int flags);
|
|
|
|
int no_dev_io(endpoint_t, message *);
|
2012-03-24 16:16:34 +01:00
|
|
|
int tty_opcl(int op, dev_t dev, endpoint_t proc, int flags);
|
|
|
|
int ctty_opcl(int op, dev_t dev, endpoint_t proc, int flags);
|
2013-04-23 01:50:45 +02:00
|
|
|
int clone_opcl(int op, dev_t dev, endpoint_t proc, int flags);
|
|
|
|
int ctty_io(endpoint_t task_nr, message *mess_ptr);
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_ioctl(message *m_out);
|
2013-08-30 13:33:56 +02:00
|
|
|
int dev_select(dev_t dev, int ops);
|
|
|
|
int dev_cancel(dev_t dev);
|
2012-04-13 14:50:38 +02:00
|
|
|
void pm_setsid(endpoint_t proc_e);
|
2012-03-24 16:16:34 +01:00
|
|
|
void bdev_up(int major);
|
|
|
|
void cdev_up(int major);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* dmap.c */
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
void lock_dmap(struct dmap *dp);
|
|
|
|
void unlock_dmap(struct dmap *dp);
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_mapdriver(void);
|
|
|
|
void init_dmap(void);
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
void init_dmap_locks(void);
|
2012-03-24 16:16:34 +01:00
|
|
|
int dmap_driver_match(endpoint_t proc, int major);
|
2013-04-23 01:50:45 +02:00
|
|
|
void dmap_endpt_up(endpoint_t proc_nr, int is_blk);
|
|
|
|
void dmap_unmap_by_endpt(endpoint_t proc_nr);
|
2012-03-24 16:16:34 +01:00
|
|
|
struct dmap *get_dmap(endpoint_t proc_e);
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
struct dmap *get_dmap_by_major(int major);
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_mapdriver(void);
|
|
|
|
int map_service(struct rprocpub *rpub);
|
2013-04-23 01:50:45 +02:00
|
|
|
void dmap_unmap_by_endpt(endpoint_t proc_nr);
|
2012-03-24 16:16:34 +01:00
|
|
|
int map_driver(const char *label, int major, endpoint_t proc_nr, int
|
|
|
|
dev_style, int flags);
|
|
|
|
int map_service(struct rprocpub *rpub);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2012-02-13 16:28:04 +01:00
|
|
|
/* elf_core_dump.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
void write_elf_core_file(struct filp *f, int csig, char *exe_name);
|
2012-02-13 16:28:04 +01:00
|
|
|
|
2006-05-11 16:57:23 +02:00
|
|
|
/* exec.c */
|
2013-08-30 14:00:50 +02:00
|
|
|
int pm_exec(vir_bytes path, size_t path_len, vir_bytes frame, size_t frame_len,
|
|
|
|
vir_bytes *pc, vir_bytes *newsp, vir_bytes *ps_str, int flags);
|
2006-05-11 16:57:23 +02:00
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* filedes.c */
|
2013-08-30 14:00:50 +02:00
|
|
|
int do_filp_gc(void);
|
2012-03-24 16:16:34 +01:00
|
|
|
void check_filp_locks(void);
|
|
|
|
void check_filp_locks_by_me(void);
|
|
|
|
void init_filps(void);
|
|
|
|
struct filp *find_filp(struct vnode *vp, mode_t bits);
|
2013-05-07 14:41:07 +02:00
|
|
|
int get_fd(struct fproc *rfp, int start, mode_t bits, int *k,
|
|
|
|
struct filp **fpt);
|
2012-03-24 16:16:34 +01:00
|
|
|
struct filp *get_filp(int fild, tll_access_t locktype);
|
2013-05-07 14:41:07 +02:00
|
|
|
struct filp *get_filp2(struct fproc *rfp, int fild, tll_access_t locktype);
|
2012-03-24 16:16:34 +01:00
|
|
|
void lock_filp(struct filp *filp, tll_access_t locktype);
|
|
|
|
void unlock_filp(struct filp *filp);
|
|
|
|
void unlock_filps(struct filp *filp1, struct filp *filp2);
|
|
|
|
int invalidate_filp(struct filp *);
|
|
|
|
void invalidate_filp_by_endpt(endpoint_t proc_e);
|
VFS: make all IPC asynchronous
By decoupling synchronous drivers from VFS, we are a big step closer to
supporting driver crashes under all circumstances. That is, VFS can't
become stuck on IPC with a synchronous driver (e.g., INET) and can
recover from crashing block drivers during open/close/ioctl or during
communication with an FS.
In order to maintain serialized communication with a synchronous driver,
the communication is wrapped by a mutex on a per driver basis (not major
numbers as there can be multiple majors with identical endpoints). Majors
that share a driver endpoint point to a single mutex object.
In order to support crashes from block drivers, the file reopen tactic
had to be changed; first reopen files associated with the crashed
driver, then send the new driver endpoint to FSes. This solves a
deadlock between the FS and the block driver;
- VFS would send REQ_NEW_DRIVER to an FS, but he FS only receives it
after retrying the current request to the newly started driver.
- The block driver would refuse the retried request until all files
had been reopened.
- VFS would reopen files only after getting a reply from the initial
REQ_NEW_DRIVER.
When a character special driver crashes, all associated files have to
be marked invalid and closed (or reopened if flagged as such). However,
they can only be closed if a thread holds exclusive access to it. To
obtain exclusive access, the worker thread (which handles the new driver
endpoint event from DS) schedules a new job to garbage collect invalid
files. This way, we can signal the worker thread that was talking to the
crashed driver and will release exclusive access to a file associated
with the crashed driver and prevent the garbage collecting worker thread
from dead locking on that file.
Also, when a character special driver crashes, RS will unmap the driver
and remap it upon restart. During unmapping, associated files are marked
invalid instead of waiting for an endpoint up event from DS, as that
event might come later than new read/write/select requests and thus
cause confusion in the freshly started driver.
When locking a filp, the usage counters are no longer checked. The usage
counter can legally go down to zero during filp invalidation while there
are locks pending.
DS events are handled by a separate worker thread instead of the main
thread as reopening files could lead to another crash and a stuck thread.
An additional worker thread is then necessary to unlock it.
Finally, with everything asynchronous a race condition in do_select
surfaced. A select entry was only marked in use after succesfully sending
initial select requests to drivers and having to wait. When multiple
select() calls were handled there was opportunity that these entries
were overwritten. This had as effect that some select results were
ignored (and select() remained blocking instead if returning) or do_select
tried to access filps that were not present (because thrown away by
secondary select()). This bug manifested itself with sendrecs, but was
very hard to reproduce. However, it became awfully easy to trigger with
asynsends only.
2012-08-28 16:06:51 +02:00
|
|
|
void invalidate_filp_by_char_major(int major);
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_verify_fd(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
int set_filp(filp_id_t sfilp);
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_set_filp(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
int copy_filp(endpoint_t to_ep, filp_id_t cfilp);
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_copy_filp(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
int put_filp(filp_id_t pfilp);
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_put_filp(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
int cancel_fd(endpoint_t ep, int fd);
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_cancel_fd(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
void close_filp(struct filp *fp);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
2009-05-11 12:02:28 +02:00
|
|
|
/* fscall.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
void nested_fs_call(message *m);
|
2009-05-11 12:02:28 +02:00
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* link.c */
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_link(message *m_out);
|
|
|
|
int do_unlink(message *m_out);
|
|
|
|
int do_rename(message *m_out);
|
|
|
|
int do_truncate(message *m_out);
|
|
|
|
int do_ftruncate(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
int truncate_vnode(struct vnode *vp, off_t newsize);
|
|
|
|
int rdlink_direct(char *orig_path, char *link_path, struct fproc *rfp);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* lock.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int lock_op(struct filp *f, int req);
|
|
|
|
void lock_revive(void);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* main.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int main(void);
|
2013-08-30 14:00:50 +02:00
|
|
|
void lock_proc(struct fproc *rfp);
|
|
|
|
void unlock_proc(struct fproc *rfp);
|
2013-04-12 18:41:23 +02:00
|
|
|
void reply(message *m_out, endpoint_t whom, int result);
|
|
|
|
void replycode(endpoint_t whom, int result);
|
2013-08-30 14:00:50 +02:00
|
|
|
void service_pm_postponed(void);
|
|
|
|
void thread_cleanup(void);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* misc.c */
|
2013-08-30 14:00:50 +02:00
|
|
|
void pm_exit(void);
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_fcntl(message *m_out);
|
2013-04-23 01:50:45 +02:00
|
|
|
void pm_fork(endpoint_t pproc, endpoint_t cproc, pid_t cpid);
|
|
|
|
void pm_setgid(endpoint_t proc_e, int egid, int rgid);
|
|
|
|
void pm_setuid(endpoint_t proc_e, int euid, int ruid);
|
|
|
|
void pm_setgroups(endpoint_t proc_e, int ngroups, gid_t *addr);
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_sync(message *m_out);
|
|
|
|
int do_fsync(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
void pm_reboot(void);
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_svrctl(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_getsysinfo(void);
|
2013-05-07 14:41:07 +02:00
|
|
|
int do_vm_call(message *m_out);
|
2013-08-30 14:00:50 +02:00
|
|
|
int pm_dumpcore(int sig, vir_bytes exe_name);
|
|
|
|
void ds_event(void);
|
2013-05-07 14:41:07 +02:00
|
|
|
int dupvm(struct fproc *fp, int pfd, int *vmfd, struct filp **f);
|
2013-06-25 14:41:01 +02:00
|
|
|
int do_getrusage(message *m_out);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* mount.c */
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_fsready(message *m_out);
|
|
|
|
int do_mount(message *m_out);
|
|
|
|
int do_umount(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
int is_nonedev(dev_t dev);
|
|
|
|
void mount_pfs(void);
|
2012-11-22 23:00:00 +01:00
|
|
|
int mount_fs(dev_t dev, char mount_dev[PATH_MAX], char mount_path[PATH_MAX],
|
2013-08-20 01:37:18 +02:00
|
|
|
endpoint_t fs_e, int rdonly, char mount_type[FSTYPE_MAX],
|
|
|
|
char mount_label[LABEL_MAX]);
|
2012-07-13 18:08:06 +02:00
|
|
|
int unmount(dev_t dev, char label[LABEL_MAX]);
|
2012-11-14 14:18:16 +01:00
|
|
|
void unmount_all(int force);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* open.c */
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_close(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
int close_fd(struct fproc *rfp, int fd_nr);
|
|
|
|
int common_open(char path[PATH_MAX], int oflags, mode_t omode);
|
|
|
|
int do_creat(void);
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_lseek(message *m_out);
|
|
|
|
int do_mknod(message *m_out);
|
|
|
|
int do_mkdir(message *m_out);
|
|
|
|
int do_open(message *m_out);
|
|
|
|
int do_slink(message *m_out);
|
2013-05-07 14:41:07 +02:00
|
|
|
int actual_llseek(struct fproc *rfp, message *m_out, int seekfd,
|
2013-03-25 17:08:04 +01:00
|
|
|
int seekwhence, off_t offset);
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_vm_open(void);
|
|
|
|
int do_vm_close(void);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* path.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
struct vnode *advance(struct vnode *dirp, struct lookup *resolve, struct
|
|
|
|
fproc *rfp);
|
|
|
|
struct vnode *eat_path(struct lookup *resolve, struct fproc *rfp);
|
|
|
|
struct vnode *last_dir(struct lookup *resolve, struct fproc *rfp);
|
|
|
|
void lookup_init(struct lookup *resolve, char *path, int flags, struct
|
|
|
|
vmnt **vmp, struct vnode **vp);
|
|
|
|
int get_name(struct vnode *dirp, struct vnode *entry, char *_name);
|
|
|
|
int canonical_path(char *orig_path, struct fproc *rfp);
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_check_perms(message *m_out);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* pipe.c */
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_pipe(message *m_out);
|
|
|
|
int do_pipe2(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
int map_vnode(struct vnode *vp, endpoint_t fs_e);
|
2013-08-30 14:00:50 +02:00
|
|
|
void unpause(void);
|
2013-02-25 12:36:29 +01:00
|
|
|
int pipe_check(struct filp *filp, int rw_flag, int oflags, int bytes,
|
2012-12-11 20:46:09 +01:00
|
|
|
int notouch);
|
2012-04-13 14:50:38 +02:00
|
|
|
void release(struct vnode *vp, int op, int count);
|
|
|
|
void revive(endpoint_t proc_e, int returned);
|
|
|
|
void suspend(int why);
|
2012-03-24 16:16:34 +01:00
|
|
|
void pipe_suspend(struct filp *rfilp, char *buf, size_t size);
|
2012-04-13 14:50:38 +02:00
|
|
|
void unsuspend_by_endpt(endpoint_t proc_e);
|
|
|
|
void wait_for(endpoint_t proc_e);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* protect.c */
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_access(message *m_out);
|
|
|
|
int do_chmod(message *m_out);
|
|
|
|
int do_chown(message *m_out);
|
|
|
|
int do_umask(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
int forbidden(struct fproc *rfp, struct vnode *vp, mode_t
|
|
|
|
access_desired);
|
|
|
|
int read_only(struct vnode *vp);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* read.c */
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_read(message *m_out);
|
|
|
|
int do_getdents(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
void lock_bsf(void);
|
|
|
|
void unlock_bsf(void);
|
2012-09-27 22:23:49 +02:00
|
|
|
void check_bsf_lock(void);
|
2013-01-13 22:44:38 +01:00
|
|
|
int do_read_write_peek(int rw_flag, int fd, char *buf, size_t bytes);
|
2013-05-07 14:41:07 +02:00
|
|
|
int actual_read_write_peek(struct fproc *rfp, int rw_flag, int fd, char *buf,
|
|
|
|
size_t bytes);
|
|
|
|
int read_write(struct fproc *rfp, int rw_flag, struct filp *f, char *buffer,
|
|
|
|
size_t nbytes, endpoint_t for_e);
|
2012-03-24 16:16:34 +01:00
|
|
|
int rw_pipe(int rw_flag, endpoint_t usr, struct filp *f, char *buf,
|
|
|
|
size_t req_size);
|
2006-10-25 15:40:36 +02:00
|
|
|
|
|
|
|
/* request.c */
|
2013-03-25 17:08:04 +01:00
|
|
|
int req_breadwrite(endpoint_t fs_e, endpoint_t user_e, dev_t dev, off_t pos,
|
2013-03-07 16:55:22 +01:00
|
|
|
unsigned int num_of_bytes, vir_bytes user_addr, int rw_flag,
|
2013-03-25 17:08:04 +01:00
|
|
|
off_t *new_posp, unsigned int *cum_iop);
|
2013-03-25 22:09:10 +01:00
|
|
|
int req_chmod(endpoint_t fs_e, ino_t inode_nr, mode_t rmode,
|
|
|
|
mode_t *new_modep);
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_chown(endpoint_t fs_e, ino_t inode_nr, uid_t newuid, gid_t newgid,
|
|
|
|
mode_t *new_modep);
|
2013-03-25 22:09:10 +01:00
|
|
|
int req_create(endpoint_t fs_e, ino_t inode_nr, int omode, uid_t uid,
|
|
|
|
gid_t gid, char *path, node_details_t *res);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_flush(endpoint_t fs_e, dev_t dev);
|
2013-08-20 01:35:35 +02:00
|
|
|
int req_statvfs(endpoint_t fs_e, struct statvfs *buf);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_ftrunc(endpoint_t fs_e, ino_t inode_nr, off_t start, off_t end);
|
2013-03-25 17:08:04 +01:00
|
|
|
int req_getdents(endpoint_t fs_e, ino_t inode_nr, off_t pos, char *buf,
|
2013-08-31 21:48:15 +02:00
|
|
|
size_t size, off_t *new_pos, int direct);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_inhibread(endpoint_t fs_e, ino_t inode_nr);
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_link(endpoint_t fs_e, ino_t link_parent, char *lastc,
|
|
|
|
ino_t linked_file);
|
|
|
|
int req_lookup(endpoint_t fs_e, ino_t dir_ino, ino_t root_ino, uid_t uid,
|
|
|
|
gid_t gid, struct lookup *resolve, lookup_res_t *res,
|
|
|
|
struct fproc *rfp);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_mkdir(endpoint_t fs_e, ino_t inode_nr, char *lastc, uid_t uid,
|
|
|
|
gid_t gid, mode_t dmode);
|
|
|
|
int req_mknod(endpoint_t fs_e, ino_t inode_nr, char *lastc, uid_t uid,
|
|
|
|
gid_t gid, mode_t dmode, dev_t dev);
|
|
|
|
int req_mountpoint(endpoint_t fs_e, ino_t inode_nr);
|
2013-03-25 22:09:10 +01:00
|
|
|
int req_newnode(endpoint_t fs_e, uid_t uid, gid_t gid, mode_t dmode,
|
|
|
|
dev_t dev, struct node_details *res);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_putnode(int fs_e, ino_t inode_nr, int count);
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_rdlink(endpoint_t fs_e, ino_t inode_nr, endpoint_t proc_e,
|
|
|
|
vir_bytes buf, size_t len, int direct);
|
2013-03-25 22:09:10 +01:00
|
|
|
int req_readsuper(struct vmnt *vmp, char *driver_name, dev_t dev, int readonly,
|
2013-08-31 21:48:15 +02:00
|
|
|
int isroot, struct node_details *res_nodep, unsigned int *fs_flags);
|
2013-03-25 17:08:04 +01:00
|
|
|
int req_readwrite(endpoint_t fs_e, ino_t inode_nr, off_t pos, int rw_flag,
|
2013-03-07 16:55:22 +01:00
|
|
|
endpoint_t user_e, vir_bytes user_addr, unsigned int num_of_bytes,
|
2013-03-25 17:08:04 +01:00
|
|
|
off_t *new_posp, unsigned int *cum_iop);
|
|
|
|
int req_bpeek(endpoint_t fs_e, dev_t dev, off_t pos, unsigned int num_of_bytes);
|
|
|
|
int req_peek(endpoint_t fs_e, ino_t inode_nr, off_t pos, unsigned int bytes);
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_rename(endpoint_t fs_e, ino_t old_dir, char *old_name, ino_t new_dir,
|
|
|
|
char *new_name);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_rmdir(endpoint_t fs_e, ino_t inode_nr, char *lastc);
|
2012-04-13 14:50:38 +02:00
|
|
|
int req_slink(endpoint_t fs_e, ino_t inode_nr, char *lastc, endpoint_t proc_e,
|
|
|
|
vir_bytes path_addr, size_t path_length, uid_t uid, gid_t gid);
|
2013-02-28 15:44:23 +01:00
|
|
|
int req_stat(endpoint_t fs_e, ino_t inode_nr, endpoint_t proc_e, vir_bytes buf);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_sync(endpoint_t fs_e);
|
|
|
|
int req_unlink(endpoint_t fs_e, ino_t inode_nr, char *lastc);
|
|
|
|
int req_unmount(endpoint_t fs_e);
|
2011-12-21 23:29:29 +01:00
|
|
|
int req_utime(endpoint_t fs_e, ino_t inode_nr, struct timespec * actv,
|
|
|
|
struct timespec * modtv);
|
2012-03-24 16:16:34 +01:00
|
|
|
int req_newdriver(endpoint_t fs_e, dev_t dev, char *label);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* stadir.c */
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_chdir(message *m_out);
|
|
|
|
int do_fchdir(message *m_out);
|
|
|
|
int do_chroot(message *m_out);
|
|
|
|
int do_fstat(message *m_out);
|
|
|
|
int do_stat(message *m_out);
|
|
|
|
int do_statvfs(message *m_out);
|
|
|
|
int do_fstatvfs(message *m_out);
|
2013-08-20 01:39:47 +02:00
|
|
|
int do_getvfsstat(message *m_out);
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_rdlink(message *m_out);
|
|
|
|
int do_lstat(message *m_out);
|
2013-08-20 01:39:47 +02:00
|
|
|
int update_statvfs(struct vmnt *vmp, struct statvfs *buf);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
|
|
|
/* time.c */
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_utime(message *);
|
|
|
|
int do_utimens(message *);
|
2005-05-31 11:50:51 +02:00
|
|
|
|
2012-02-13 16:28:04 +01:00
|
|
|
/* tll.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
void tll_downgrade(tll_t *tllp);
|
|
|
|
int tll_haspendinglock(tll_t *tllp);
|
|
|
|
void tll_init(tll_t *tllp);
|
|
|
|
int tll_islocked(tll_t *tllp);
|
|
|
|
int tll_lock(tll_t *tllp, tll_access_t locktype);
|
|
|
|
int tll_locked_by_me(tll_t *tllp);
|
|
|
|
void tll_lockstat(tll_t *tllp);
|
|
|
|
int tll_unlock(tll_t *tllp);
|
|
|
|
void tll_upgrade(tll_t *tllp);
|
2012-02-13 16:28:04 +01:00
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* utility.c */
|
2011-12-21 23:29:29 +01:00
|
|
|
struct timespec clock_timespec(void);
|
2012-03-24 16:16:34 +01:00
|
|
|
unsigned conv2(int norm, int w);
|
|
|
|
long conv4(int norm, long x);
|
2012-04-13 14:50:38 +02:00
|
|
|
int copy_name(size_t len, char *dest);
|
|
|
|
int fetch_name(vir_bytes path, size_t len, char *dest);
|
2013-04-12 18:41:23 +02:00
|
|
|
int no_sys(message *);
|
2012-03-24 16:16:34 +01:00
|
|
|
int isokendpt_f(char *f, int l, endpoint_t e, int *p, int ft);
|
|
|
|
int in_group(struct fproc *rfp, gid_t grp);
|
2005-04-21 16:53:53 +02:00
|
|
|
|
endpoint-aware conversion of servers.
'who', indicating caller number in pm and fs and some other servers, has
been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.).
In both PM and FS, isokendpt() convert endpoints to process slot
numbers, returning OK if it was a valid and consistent endpoint number.
okendpt() does the same but panic()s if it doesn't succeed. (In PM,
this is pm_isok..)
pm and fs keep their own records of process endpoints in their proc tables,
which are needed to make kernel calls about those processes.
message field names have changed.
fs drivers are endpoints.
fs now doesn't try to get out of driver deadlock, as the protocol isn't
supposed to let that happen any more. (A warning is printed if ELOCKED
is detected though.)
fproc[].fp_task (indicating which driver the process is suspended on)
became an int.
PM and FS now get endpoint numbers of initial boot processes from the
kernel. These happen to be the same as the old proc numbers, to let
user processes reach them with the old numbers, but FS and PM don't know
that. All new processes after INIT, even after the generation number
wraps around, get endpoint numbers with generation 1 and higher, so
the first instances of the boot processes are the only processes ever
to have endpoint numbers in the old proc number range.
More return code checks of sys_* functions have been added.
IS has become endpoint-aware. Ditched the 'text' and 'data' fields
in the kernel dump (which show locations, not sizes, so aren't terribly
useful) in favour of the endpoint number. Proc number is still visible.
Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got
the formatting changed.
PM reading segments using rw_seg() has changed - it uses other fields
in the message now instead of encoding the segment and process number and
fd in the fd field. For that it uses _read_pm() and _write_pm() which to
_taskcall()s directly in pm/misc.c.
PM now sys_exit()s itself on panic(), instead of sys_abort().
RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
|
|
|
#define okendpt(e, p) isokendpt_f(__FILE__, __LINE__, (e), (p), 1)
|
|
|
|
#define isokendpt(e, p) isokendpt_f(__FILE__, __LINE__, (e), (p), 0)
|
|
|
|
|
2006-10-25 15:40:36 +02:00
|
|
|
/* vmnt.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
void check_vmnt_locks(void);
|
|
|
|
void check_vmnt_locks_by_me(struct fproc *rfp);
|
|
|
|
void mark_vmnt_free(struct vmnt *vmp);
|
|
|
|
struct vmnt *get_free_vmnt(void);
|
|
|
|
struct vmnt *find_vmnt(endpoint_t fs_e);
|
|
|
|
struct vmnt *get_locked_vmnt(struct fproc *rfp);
|
|
|
|
void init_vmnts(void);
|
|
|
|
int lock_vmnt(struct vmnt *vp, tll_access_t locktype);
|
|
|
|
void unlock_vmnt(struct vmnt *vp);
|
|
|
|
void vmnt_unmap_by_endpt(endpoint_t proc_e);
|
2012-11-27 18:33:59 +01:00
|
|
|
void fetch_vmnt_paths(void);
|
VFS: fix locking bugs
.sync and fsync used unnecessarily restrictive locking type
.fsync violated locking order by obtaining a vmnt lock after a filp lock
.fsync contained a TOCTOU bug
.new_node violated locking rules (didn't upgrade lock upon file creation)
.do_pipe used unnecessarily restrictive locking type
.always lock pipes exclusively; even a read operation might require to do
a write on a vnode object (update pipe size)
.when opening a file with O_TRUNC, upgrade vnode lock when truncating
.utime used unnecessarily restrictive locking type
.path parsing:
.always acquire VMNT_WRITE or VMNT_EXCL on vmnt and downgrade to
VMNT_READ if that was what was actually requested. This prevents the
following deadlock scenario:
thread A:
lock_vmnt(vmp, TLL_READSER);
lock_vnode(vp, TLL_READSER);
upgrade_vmnt_lock(vmp, TLL_WRITE);
thread B:
lock_vmnt(vmp, TLL_READ);
lock_vnode(vp, TLL_READSER);
thread A will be stuck in upgrade_vmnt_lock and thread B is stuck in
lock_vnode. This happens when, for example, thread A tries create a
new node (open.c:new_node) and thread B tries to do eat_path to
change dir (stadir.c:do_chdir). When the path is being resolved, a
vnode is always locked with VNODE_OPCL (TLL_READSER) and then
downgraded to VNODE_READ if read-only is actually requested. Thread
A locks the vmnt with VMNT_WRITE (TLL_READSER) which still allows
VMNT_READ locks. Thread B can't acquire a lock on the vnode because
thread A has it; Thread A can't upgrade its vmnt lock to VMNT_WRITE
(TLL_WRITE) because thread B has a VMNT_READ lock on it.
By serializing vmnt locks during path parsing, thread B can only
acquire a lock on vmp when thread A has completely finished its
operation.
2012-11-30 13:49:53 +01:00
|
|
|
void upgrade_vmnt_lock(struct vmnt *vmp);
|
|
|
|
void downgrade_vmnt_lock(struct vmnt *vmp);
|
2006-10-25 15:40:36 +02:00
|
|
|
|
|
|
|
/* vnode.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
void check_vnode_locks(void);
|
|
|
|
void check_vnode_locks_by_me(struct fproc *rfp);
|
|
|
|
struct vnode *get_free_vnode(void);
|
2012-04-02 17:16:44 +02:00
|
|
|
struct vnode *find_vnode(int fs_e, ino_t inode);
|
2012-03-24 16:16:34 +01:00
|
|
|
void init_vnodes(void);
|
|
|
|
int is_vnode_locked(struct vnode *vp);
|
|
|
|
int lock_vnode(struct vnode *vp, tll_access_t locktype);
|
|
|
|
void unlock_vnode(struct vnode *vp);
|
|
|
|
void dup_vnode(struct vnode *vp);
|
|
|
|
void put_vnode(struct vnode *vp);
|
|
|
|
void vnode_clean_refs(struct vnode *vp);
|
VFS: fix locking bugs
.sync and fsync used unnecessarily restrictive locking type
.fsync violated locking order by obtaining a vmnt lock after a filp lock
.fsync contained a TOCTOU bug
.new_node violated locking rules (didn't upgrade lock upon file creation)
.do_pipe used unnecessarily restrictive locking type
.always lock pipes exclusively; even a read operation might require to do
a write on a vnode object (update pipe size)
.when opening a file with O_TRUNC, upgrade vnode lock when truncating
.utime used unnecessarily restrictive locking type
.path parsing:
.always acquire VMNT_WRITE or VMNT_EXCL on vmnt and downgrade to
VMNT_READ if that was what was actually requested. This prevents the
following deadlock scenario:
thread A:
lock_vmnt(vmp, TLL_READSER);
lock_vnode(vp, TLL_READSER);
upgrade_vmnt_lock(vmp, TLL_WRITE);
thread B:
lock_vmnt(vmp, TLL_READ);
lock_vnode(vp, TLL_READSER);
thread A will be stuck in upgrade_vmnt_lock and thread B is stuck in
lock_vnode. This happens when, for example, thread A tries create a
new node (open.c:new_node) and thread B tries to do eat_path to
change dir (stadir.c:do_chdir). When the path is being resolved, a
vnode is always locked with VNODE_OPCL (TLL_READSER) and then
downgraded to VNODE_READ if read-only is actually requested. Thread
A locks the vmnt with VMNT_WRITE (TLL_READSER) which still allows
VMNT_READ locks. Thread B can't acquire a lock on the vnode because
thread A has it; Thread A can't upgrade its vmnt lock to VMNT_WRITE
(TLL_WRITE) because thread B has a VMNT_READ lock on it.
By serializing vmnt locks during path parsing, thread B can only
acquire a lock on vmp when thread A has completely finished its
operation.
2012-11-30 13:49:53 +01:00
|
|
|
void upgrade_vnode_lock(struct vnode *vp);
|
2006-10-25 15:40:36 +02:00
|
|
|
|
2005-04-21 16:53:53 +02:00
|
|
|
/* write.c */
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_write(message *m_out);
|
2005-06-06 13:40:32 +02:00
|
|
|
|
2010-08-25 15:06:43 +02:00
|
|
|
/* gcov.c */
|
2012-03-24 16:16:34 +01:00
|
|
|
int do_gcov_flush(void);
|
2011-08-09 10:39:33 +02:00
|
|
|
#if ! USE_COVERAGE
|
|
|
|
#define do_gcov_flush no_sys
|
|
|
|
#endif
|
2010-08-25 15:06:43 +02:00
|
|
|
|
2005-06-06 13:40:32 +02:00
|
|
|
/* select.c */
|
2013-04-12 18:41:23 +02:00
|
|
|
int do_select(message *m_out);
|
2012-03-24 16:16:34 +01:00
|
|
|
void init_select(void);
|
|
|
|
void select_callback(struct filp *, int ops);
|
2013-08-25 00:26:38 +02:00
|
|
|
void select_forget(void);
|
2012-03-24 16:16:34 +01:00
|
|
|
void select_reply1(endpoint_t driver_e, int minor, int status);
|
|
|
|
void select_reply2(endpoint_t driver_e, int minor, int status);
|
|
|
|
void select_timeout_check(timer_t *);
|
|
|
|
void select_unsuspend_by_endpt(endpoint_t proc);
|
2011-07-30 08:03:23 +02:00
|
|
|
|
2012-02-13 16:28:04 +01:00
|
|
|
/* worker.c */
|
2013-08-30 14:00:50 +02:00
|
|
|
void worker_init(void);
|
2012-03-24 16:16:34 +01:00
|
|
|
int worker_available(void);
|
|
|
|
struct worker_thread *worker_get(thread_t worker_tid);
|
|
|
|
void worker_signal(struct worker_thread *worker);
|
2013-08-30 14:00:50 +02:00
|
|
|
int worker_can_start(struct fproc *rfp);
|
|
|
|
void worker_start(struct fproc *rfp, void (*func)(void), message *m_ptr,
|
|
|
|
int use_spare);
|
2012-03-24 16:16:34 +01:00
|
|
|
void worker_stop(struct worker_thread *worker);
|
|
|
|
void worker_stop_by_endpt(endpoint_t proc_e);
|
|
|
|
void worker_wait(void);
|
2013-08-30 14:00:50 +02:00
|
|
|
struct worker_thread *worker_suspend(void);
|
|
|
|
void worker_resume(struct worker_thread *org_self);
|
|
|
|
void worker_set_proc(struct fproc *rfp);
|
2012-02-13 16:28:04 +01:00
|
|
|
#endif
|